instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
int64
0
0
environment_setup_commit
stringclasses
89 values
FAIL_TO_PASS
sequencelengths
1
4.94k
PASS_TO_PASS
sequencelengths
0
7.82k
meta
dict
created_at
unknown
license
stringclasses
8 values
stuartmccoll__gitlab-changelog-generator-31
diff --git a/CHANGELOG.md b/CHANGELOG.md index 57d78f7..63bc117 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # CHANGELOG +## v1.0.6 - 2019/10/01 + +- Added a flag to disable SSL checking - [issue](https://github.com/stuartmccoll/gitlab-changelog-generator/issues/30) raised by [PhilLab](https://github.com/PhilLab). + ## v1.0.5 - 2019/06/01 - Amended the way date formatting is handled - [issue](https://github.com/stuartmccoll/gitlab-changelog-generator/issues/25) raised by [smutel](https://github.com/smutel), commit(s) contributed by [smutel](https://github.com/smutel). diff --git a/changelog_generator/calls.py b/changelog_generator/calls.py index 2546183..be30a21 100644 --- a/changelog_generator/calls.py +++ b/changelog_generator/calls.py @@ -24,6 +24,7 @@ def get_last_commit_date(cli_args: dict) -> str: if "token" in cli_args else None }, + verify=cli_args["ssl"], ) logger.info(response.status_code) response.raise_for_status() @@ -69,6 +70,7 @@ def get_closed_issues_for_project(cli_args: dict) -> dict: headers={"PRIVATE-TOKEN": cli_args["token"]} if "token" in cli_args else None, + verify=cli_args["ssl"], ) response.raise_for_status() except requests.exceptions.HTTPError as ex: @@ -103,6 +105,7 @@ def get_last_tagged_release_date(cli_args: dict) -> str: headers={"PRIVATE-TOKEN": cli_args["token"]} if "token" in cli_args else None, + verify=cli_args["ssl"], ) response.raise_for_status() except requests.exceptions.HTTPError as ex: @@ -138,6 +141,7 @@ def get_commits_since_date(date: str, cli_args: dict) -> list: headers={"PRIVATE-TOKEN": cli_args["token"]} if "token" in cli_args else None, + verify=cli_args["ssl"], ) response.raise_for_status() except requests.exceptions.HTTPError as ex: diff --git a/changelog_generator/entry_point.py b/changelog_generator/entry_point.py index e65de05..7c7d007 100644 --- a/changelog_generator/entry_point.py +++ b/changelog_generator/entry_point.py @@ -65,6 +65,15 @@ def process_arguments() -> dict: help="gitlab personal token for auth", required=False, ) + parser.add_argument( + "-s", + "--ssl", + dest="ssl", + help="specify whether or not to enable ssl", + required=False, + default=True, + type=lambda x: (str(x).lower() in ["false", "2", "no"]), + ) args = parser.parse_args() @@ -78,6 +87,7 @@ def process_arguments() -> dict: "version": args.version, "changelog": args.changelog, "token": args.token, + "ssl": args.ssl, } diff --git a/setup.py b/setup.py index dd09982..89e1dd2 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="gitlab-changelog-generator", - version="1.0.5", + version="1.0.6", author="Stuart McColl", author_email="[email protected]", description="A small command line utility for generating CHANGELOG.md "
stuartmccoll/gitlab-changelog-generator
bc7168731f834eaa8e89721490f58728c1291eeb
diff --git a/changelog_generator/tests/test_calls.py b/changelog_generator/tests/test_calls.py index 2147a0a..de881c1 100644 --- a/changelog_generator/tests/test_calls.py +++ b/changelog_generator/tests/test_calls.py @@ -29,6 +29,7 @@ class TestCalls(unittest.TestCase): "branch_two": "master", "version": "1", "changelog": "N", + "ssl": "True", } try: @@ -51,6 +52,7 @@ class TestCalls(unittest.TestCase): "branch_two": "master", "version": "1", "changelog": "N", + "ssl": "True", } commit_date = get_last_commit_date(cli_args) @@ -74,6 +76,7 @@ class TestCalls(unittest.TestCase): "branch_two": "master", "version": "1", "changelog": "N", + "ssl": "True", } try: @@ -103,6 +106,7 @@ class TestCalls(unittest.TestCase): "branch_two": "master", "version": "1", "changelog": "N", + "ssl": "True", } commits = get_commits_since_date( @@ -138,6 +142,7 @@ class TestCalls(unittest.TestCase): "branch_two": "master", "version": "1", "changelog": "N", + "ssl": "True", } self.assertEqual( @@ -165,6 +170,7 @@ class TestCalls(unittest.TestCase): "branch_two": "master", "version": "1", "changelog": "N", + "ssl": "True", } self.assertEqual( diff --git a/changelog_generator/tests/test_entry_point.py b/changelog_generator/tests/test_entry_point.py index c05877c..5aafc8b 100644 --- a/changelog_generator/tests/test_entry_point.py +++ b/changelog_generator/tests/test_entry_point.py @@ -32,6 +32,7 @@ class TestGenerator(unittest.TestCase): "version": "1.2.3", "changelog": "N", "token": "test-token", + "ssl": True, } result = process_arguments()
Add support for self-signed https certificates Follow-up of #23 : Now that SSL is supported, it would be awesome to have a flag for disabling the certificate check in case of a self-hosted, self-signed certificate (mostly when using an intranet instance) - https://stackoverflow.com/questions/15445981/how-do-i-disable-the-security-certificate-check-in-python-requests - maybe this answer explains the least intrusive patch: https://stackoverflow.com/a/50159273/1531708
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "changelog_generator/tests/test_entry_point.py::TestGenerator::test_process_arguments" ]
[ "changelog_generator/tests/test_calls.py::TestCalls::test_unsuccessful_commits_since_date", "changelog_generator/tests/test_calls.py::TestCalls::test_get_last_commit_date", "changelog_generator/tests/test_calls.py::TestCalls::test_get_closed_issues_for_project", "changelog_generator/tests/test_calls.py::TestCalls::test_unsuccessful_get_last_commit_date", "changelog_generator/tests/test_calls.py::TestCalls::test_get_last_tagged_release_date", "changelog_generator/tests/test_calls.py::TestCalls::test_commits_since_date" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-10-01T19:37:16Z"
mit
stummjr__flake8-scrapy-19
diff --git a/finders/oldstyle.py b/finders/oldstyle.py index 0910d97..dceef88 100644 --- a/finders/oldstyle.py +++ b/finders/oldstyle.py @@ -12,7 +12,7 @@ class UrlJoinIssueFinder(IssueFinder): return first_param = node.args[0] - if not isinstance(first_param, ast.Attribute): + if not isinstance(first_param, ast.Attribute) or not isinstance(first_param.value, ast.Name): return if first_param.value.id == 'response' and first_param.attr == 'url': diff --git a/flake8_scrapy.py b/flake8_scrapy.py index c1198f4..839b2b8 100644 --- a/flake8_scrapy.py +++ b/flake8_scrapy.py @@ -6,7 +6,7 @@ from finders.domains import ( from finders.oldstyle import OldSelectorIssueFinder, UrlJoinIssueFinder -__version__ = '0.0.1' +__version__ = '0.0.2' class ScrapyStyleIssueFinder(ast.NodeVisitor): diff --git a/setup.py b/setup.py index 4d058d5..3518539 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ with open('README.md', 'r') as f: setuptools.setup( name='flake8-scrapy', license='MIT', - version='0.0.1', + version='0.0.2', long_description=long_description, long_description_content_type='text/markdown', author='Valdir Stumm Junior',
stummjr/flake8-scrapy
e09bcf1e387b52d081d2df4b6d6c459203b31a5b
diff --git a/tests/test_oldstyle.py b/tests/test_oldstyle.py index 29e9f50..6dc2d34 100644 --- a/tests/test_oldstyle.py +++ b/tests/test_oldstyle.py @@ -17,6 +17,8 @@ def test_finds_old_style_urljoin(code): @pytest.mark.parametrize('code', [ ('response.urljoin("/foo")'), ('url = urljoin()'), + ('urljoin(x, "/foo")'), + ('urljoin(x.y.z, "/foo")'), ]) def test_dont_find_old_style_urljoin(code): issues = run_checker(code)
Failed SCP03 rule check When starting the fakehaven stage in CI, I received the following error: ``` $ flakeheaven lint --format=grouped --exit-zero --import-order-style pep8 --application-import-names directories multiprocessing.pool.RemoteTraceback: """ Traceback (most recent call last): File "/usr/local/lib/python3.7/multiprocessing/pool.py", line 121, in worker result = (True, func(*args, **kwds)) File "/usr/local/lib/python3.7/multiprocessing/pool.py", line 44, in mapstar return list(map(*args)) File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 687, in _run_checks return checker.run_checks() File "/usr/local/lib/python3.7/site-packages/flakeheaven/_patched/_checkers.py", line 282, in run_checks return super().run_checks() File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 597, in run_checks self.run_ast_checks() File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 500, in run_ast_checks for (line_number, offset, text, _) in runner: File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 55, in run finder.visit(self.tree) File "/usr/local/lib/python3.7/ast.py", line 271, in visit return visitor(node) File "/usr/local/lib/python3.7/ast.py", line 279, in generic_visit self.visit(item) File "/usr/local/lib/python3.7/ast.py", line 271, in visit return visitor(node) File "/usr/local/lib/python3.7/ast.py", line 279, in generic_visit self.visit(item) File "/usr/local/lib/python3.7/ast.py", line 271, in visit return visitor(node) File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 38, in visit_Assign self.find_issues_visitor('Assign', node) File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 35, in find_issues_visitor self.generic_visit(node) File "/usr/local/lib/python3.7/ast.py", line 281, in generic_visit self.visit(value) File "/usr/local/lib/python3.7/ast.py", line 271, in visit return visitor(node) File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 41, in visit_Call self.find_issues_visitor('Call', node) File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 35, in find_issues_visitor self.generic_visit(node) File "/usr/local/lib/python3.7/ast.py", line 279, in generic_visit self.visit(item) File "/usr/local/lib/python3.7/ast.py", line 271, in visit return visitor(node) File "/usr/local/lib/python3.7/ast.py", line 281, in generic_visit self.visit(value) File "/usr/local/lib/python3.7/ast.py", line 271, in visit return visitor(node) File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 41, in visit_Call self.find_issues_visitor('Call', node) File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 34, in find_issues_visitor self.issues.extend(list(issues)) File "/usr/local/lib/python3.7/site-packages/finders/oldstyle.py", line 18, in find_issues if first_param.value.id == 'response' and first_param.attr == 'url': AttributeError: 'Attribute' object has no attribute 'id' """ The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/usr/local/bin/flakeheaven", line 8, in <module> sys.exit(entrypoint()) File "/usr/local/lib/python3.7/site-packages/flakeheaven/_cli.py", line 40, in entrypoint exit_code, msg = main(argv) File "/usr/local/lib/python3.7/site-packages/flakeheaven/_cli.py", line 32, in main return COMMANDS[command_name](argv=argv[1:]) File "/usr/local/lib/python3.7/site-packages/flakeheaven/commands/_lint.py", line 12, in lint_command app.run(argv) File "/usr/local/lib/python3.7/site-packages/flake8/main/application.py", line 375, in run self._run(argv) File "/usr/local/lib/python3.7/site-packages/flake8/main/application.py", line 364, in _run self.run_checks() File "/usr/local/lib/python3.7/site-packages/flake8/main/application.py", line 271, in run_checks self.file_checker_manager.run() File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 309, in run self.run_parallel() File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 275, in run_parallel for ret in pool_map: File "/usr/local/lib/python3.7/multiprocessing/pool.py", line 354, in <genexpr> return (item for chunk in result for item in chunk) File "/usr/local/lib/python3.7/multiprocessing/pool.py", line 748, in next raise value AttributeError: 'Attribute' object has no attribute 'id' ``` The problem occurs in this line: ```python urljoin(settings.SERVICE_URLS.PD, '/path') ``` Where are the `settings`: ```python from pydantic import BaseSettings, BaseModel class ServiceUrlsSchema(BaseModel): PD: str class Settings(BaseSettings): SERVICE_URLS: ServiceUrlsSchema ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_oldstyle.py::test_dont_find_old_style_urljoin[urljoin(x.y.z," ]
[ "tests/test_oldstyle.py::test_finds_old_style_urljoin[urljoin(response.url,", "tests/test_oldstyle.py::test_finds_old_style_urljoin[url", "tests/test_oldstyle.py::test_dont_find_old_style_urljoin[response.urljoin(\"/foo\")]", "tests/test_oldstyle.py::test_dont_find_old_style_urljoin[url", "tests/test_oldstyle.py::test_dont_find_old_style_urljoin[urljoin(x,", "tests/test_oldstyle.py::test_find_old_style_selector[sel" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-03-29T17:45:55Z"
mit
suminb__base62-22
diff --git a/base62.py b/base62.py index df45c41..5017c43 100644 --- a/base62.py +++ b/base62.py @@ -16,24 +16,18 @@ CHARSET_DEFAULT = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxy CHARSET_INVERTED = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" -def encode(n, minlen=1, charset=CHARSET_DEFAULT): +def encode(n, charset=CHARSET_DEFAULT): """Encodes a given integer ``n``.""" chs = [] while n > 0: - r = n % BASE - n //= BASE + n, r = divmod(n, BASE) + chs.insert(0, charset[r]) - chs.append(charset[r]) + if not chs: + return "0" - if len(chs) > 0: - chs.reverse() - else: - chs.append("0") - - s = "".join(chs) - s = charset[0] * max(minlen - len(s), 0) + s - return s + return "".join(chs) def encodebytes(barray, charset=CHARSET_DEFAULT): @@ -45,7 +39,27 @@ def encodebytes(barray, charset=CHARSET_DEFAULT): """ _check_type(barray, bytes) - return encode(int.from_bytes(barray, "big"), charset=charset) + + # Count the number of leading zeros. + leading_zeros_count = 0 + for i in range(len(barray)): + if barray[i] != 0: + break + leading_zeros_count += 1 + + # Encode the leading zeros as "0" followed by a character indicating the count. + # This pattern may occur several times if there are many leading zeros. + n, r = divmod(leading_zeros_count, len(charset) - 1) + zero_padding = f"0{charset[-1]}" * n + if r: + zero_padding += f"0{charset[r]}" + + # Special case: the input is empty, or is entirely null bytes. + if leading_zeros_count == len(barray): + return zero_padding + + value = encode(int.from_bytes(barray, "big"), charset=charset) + return zero_padding + value def decode(encoded, charset=CHARSET_DEFAULT): @@ -56,9 +70,6 @@ def decode(encoded, charset=CHARSET_DEFAULT): """ _check_type(encoded, str) - if encoded.startswith("0z"): - encoded = encoded[2:] - l, i, v = len(encoded), 0, 0 for x in encoded: v += _value(x, charset=charset) * (BASE ** (l - (i + 1))) @@ -75,6 +86,11 @@ def decodebytes(encoded, charset=CHARSET_DEFAULT): :rtype: bytes """ + leading_null_bytes = b"" + while encoded.startswith("0") and len(encoded) >= 2: + leading_null_bytes += b"\x00" * _value(encoded[1], charset) + encoded = encoded[2:] + decoded = decode(encoded, charset=charset) buf = bytearray() while decoded > 0: @@ -82,7 +98,7 @@ def decodebytes(encoded, charset=CHARSET_DEFAULT): decoded //= 256 buf.reverse() - return bytes(buf) + return leading_null_bytes + bytes(buf) def _value(ch, charset):
suminb/base62
53b87a62e835ee6a3fb9fe3ec4999ce115161655
diff --git a/tests/test_basic.py b/tests/test_basic.py index 73f1d09..bade00f 100644 --- a/tests/test_basic.py +++ b/tests/test_basic.py @@ -4,7 +4,6 @@ import base62 bytes_int_pairs = [ - (b"\x00", 0), (b"\x01", 1), (b"\x01\x01", 0x0101), (b"\xff\xff", 0xFFFF), @@ -20,9 +19,6 @@ def test_const(): def test_basic(): assert base62.encode(0) == "0" - assert base62.encode(0, minlen=0) == "0" - assert base62.encode(0, minlen=1) == "0" - assert base62.encode(0, minlen=5) == "00000" assert base62.decode("0") == 0 assert base62.decode("0000") == 0 assert base62.decode("000001") == 1 @@ -30,19 +26,11 @@ def test_basic(): assert base62.encode(34441886726) == "base62" assert base62.decode("base62") == 34441886726 - # NOTE: For backward compatibility. When I first wrote this module in PHP, - # I used to use the `0z` prefix to denote a base62 encoded string (similar - # to `0x` for hexadecimal strings). - assert base62.decode("0zbase62") == 34441886726 - def test_basic_inverted(): kwargs = {"charset": base62.CHARSET_INVERTED} assert base62.encode(0, **kwargs) == "0" - assert base62.encode(0, minlen=0, **kwargs) == "0" - assert base62.encode(0, minlen=1, **kwargs) == "0" - assert base62.encode(0, minlen=5, **kwargs) == "00000" assert base62.decode("0", **kwargs) == 0 assert base62.decode("0000", **kwargs) == 0 assert base62.decode("000001", **kwargs) == 1 @@ -50,11 +38,6 @@ def test_basic_inverted(): assert base62.encode(10231951886, **kwargs) == "base62" assert base62.decode("base62", **kwargs) == 10231951886 - # NOTE: For backward compatibility. When I first wrote this module in PHP, - # I used to use the `0z` prefix to denote a base62 encoded string (similar - # to `0x` for hexadecimal strings). - assert base62.decode("0zbase62", **kwargs) == 10231951886 - @pytest.mark.parametrize("b, i", bytes_int_pairs) def test_bytes_to_int(b, i): @@ -77,7 +60,7 @@ def test_encodebytes_rtype(): assert isinstance(encoded, str) [email protected]("s", ["0", "1", "a", "z", "ykzvd7ga", "0z1234"]) [email protected]("s", ["0", "1", "a", "z", "ykzvd7ga"]) def test_decodebytes(s): assert int.from_bytes(base62.decodebytes(s), "big") == base62.decode(s) @@ -113,3 +96,23 @@ def test_invalid_alphabet(): def test_invalid_string(): with pytest.raises(TypeError): base62.encodebytes({}) + + [email protected]( + "input_bytes, expected_encoded_text", + ( + (b"", ""), + (b"\x00", "01"), + (b"\x00\x00", "02"), + (b"\x00\x01", "011"), + (b"\x00" * 61, "0z"), + (b"\x00" * 62, "0z01"), + ), +) +def test_leading_zeros(input_bytes, expected_encoded_text): + """Verify that leading null bytes are not lost.""" + + encoded_text = base62.encodebytes(input_bytes) + assert encoded_text == expected_encoded_text + output_bytes = base62.decodebytes(encoded_text) + assert output_bytes == input_bytes
Ignoring leading zero bytes Hello, first of all, thank you for this library. I am using it for encoding 16 byte blocks and I have noticed, that during encoding, leading bytes that are equal to `0x00` are ignored. This is due to conversion to integer, which the library internally does. I believe this is not a correct behavior, because without knowledge of the input bytes block length, you cannot reconstruct (decode) the original input from output. But for example in encryption (and many other areas), all bytes (incl. leading zero bytes) matter. I'll give an example using base64, which does this correctly: ``` encoded = b64encode(b'\x00\x00\x01').decode() print(encoded) decoded = b64decode(encoded) print(decoded) ``` This code yields: ``` AAAB b'\x00\x00\x01' ``` Now your library: ``` encoded = base62.encodebytes(b'\x00\x00\x01') print(encoded) decoded = base62.decodebytes(encoded) print(decoded) ``` Yields: ``` 1 b'\x01' ``` As you can see, decoded output is not equal the input (it misses the two leading zero bytes).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_basic.py::test_leading_zeros[-]", "tests/test_basic.py::test_leading_zeros[\\x00-01]", "tests/test_basic.py::test_leading_zeros[\\x00\\x00-02]", "tests/test_basic.py::test_leading_zeros[\\x00\\x01-011]", "tests/test_basic.py::test_leading_zeros[\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00-0z]", "tests/test_basic.py::test_leading_zeros[\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00-0z01]" ]
[ "tests/test_basic.py::test_const", "tests/test_basic.py::test_basic", "tests/test_basic.py::test_basic_inverted", "tests/test_basic.py::test_bytes_to_int[\\x01-1]", "tests/test_basic.py::test_bytes_to_int[\\x01\\x01-257]", "tests/test_basic.py::test_bytes_to_int[\\xff\\xff-65535]", "tests/test_basic.py::test_bytes_to_int[\\x01\\x01\\x01-65793]", "tests/test_basic.py::test_bytes_to_int[\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08-72623859790382856]", "tests/test_basic.py::test_encodebytes[\\x01-1]", "tests/test_basic.py::test_encodebytes[\\x01\\x01-257]", "tests/test_basic.py::test_encodebytes[\\xff\\xff-65535]", "tests/test_basic.py::test_encodebytes[\\x01\\x01\\x01-65793]", "tests/test_basic.py::test_encodebytes[\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08-72623859790382856]", "tests/test_basic.py::test_encodebytes_type", "tests/test_basic.py::test_encodebytes_rtype", "tests/test_basic.py::test_decodebytes[0]", "tests/test_basic.py::test_decodebytes[1]", "tests/test_basic.py::test_decodebytes[a]", "tests/test_basic.py::test_decodebytes[z]", "tests/test_basic.py::test_decodebytes[ykzvd7ga]", "tests/test_basic.py::test_decodebytes_type", "tests/test_basic.py::test_decodebytes_rtype", "tests/test_basic.py::test_roundtrip[]", "tests/test_basic.py::test_roundtrip[0]", "tests/test_basic.py::test_roundtrip[bytes", "tests/test_basic.py::test_roundtrip[\\x01\\x00\\x80]", "tests/test_basic.py::test_invalid_alphabet", "tests/test_basic.py::test_invalid_string" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-01-25T14:33:04Z"
bsd-2-clause
sunpy__ablog-97
diff --git a/ablog/post.py b/ablog/post.py index b6be6c8..e08dacc 100644 --- a/ablog/post.py +++ b/ablog/post.py @@ -709,7 +709,7 @@ def generate_atom_feeds(app): feed.title(feed_title) feed.link(href=url) feed.subtitle(blog.blog_feed_subtitle) - feed.link(href=feed_url) + feed.link(href=feed_url, rel="self") feed.language(app.config.language) feed.generator("ABlog", ablog.__version__, "https://ablog.readthedocs.org/") @@ -741,17 +741,18 @@ def generate_atom_feeds(app): # Entry values that support templates title = post.title - if post.excerpt: - summary = " ".join(paragraph.astext() for paragraph in post.excerpt[0]) - else: - summary = "" + summary = "".join(paragraph.astext() for paragraph in post.excerpt) template_values = {} for element in ("title", "summary", "content"): if element in feed_templates: template_values[element] = jinja2.Template(feed_templates[element]).render(**locals()) feed_entry.title(template_values.get("title", title)) - feed_entry.summary(template_values.get("summary", summary)) - feed_entry.content(content=template_values.get("content", content), type="html") + summary = template_values.get("summary", summary) + if summary: + feed_entry.summary(summary) + content = template_values.get("content", content) + if content: + feed_entry.content(content=content, type="html") parent_dir = os.path.dirname(feed_path) if not os.path.isdir(parent_dir):
sunpy/ablog
5e894ab7b2a6667eaac84a42b31d3c96b5028d34
diff --git a/tests/roots/test-build/foo-empty-post.rst b/tests/roots/test-build/foo-empty-post.rst new file mode 100644 index 0000000..e2221df --- /dev/null +++ b/tests/roots/test-build/foo-empty-post.rst @@ -0,0 +1,5 @@ +.. post:: 2021-03-23 + +############## +Foo Empty Post +############## diff --git a/tests/roots/test-build/post.rst b/tests/roots/test-build/post.rst index d8f1d1e..bef264d 100644 --- a/tests/roots/test-build/post.rst +++ b/tests/roots/test-build/post.rst @@ -4,6 +4,8 @@ Foo Post Title ============== - Foo post description. + Foo post description `with link`_. Foo post content. + +.. _`with link`: https://example.com diff --git a/tests/test_build.py b/tests/test_build.py index ff4211b..10c077b 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -26,13 +26,13 @@ def test_feed(app, status, warning): with feed_path.open() as feed_opened: feed_tree = lxml.etree.parse(feed_opened) entries = feed_tree.findall("{http://www.w3.org/2005/Atom}entry") - assert len(entries) == 1, "Wrong number of Atom feed entries" + assert len(entries) == 2, "Wrong number of Atom feed entries" entry = entries[0] title = entry.find("{http://www.w3.org/2005/Atom}title") assert title.text == "Foo Post Title", "Wrong Atom feed entry title" summary = entry.find("{http://www.w3.org/2005/Atom}summary") - assert summary.text == "Foo post description.", "Wrong Atom feed entry summary" + assert summary.text == "Foo post description with link.", "Wrong Atom feed entry summary" categories = entry.findall("{http://www.w3.org/2005/Atom}category") assert len(categories) == 2, "Wrong number of Atom feed categories" assert categories[0].attrib["label"] == "Foo Tag", "Wrong Atom feed first category" @@ -42,6 +42,16 @@ def test_feed(app, status, warning): content = entry.find("{http://www.w3.org/2005/Atom}content") assert "Foo post content." in content.text, "Wrong Atom feed entry content" + empty_entry = entries[1] + title = empty_entry.find("{http://www.w3.org/2005/Atom}title") + assert title.text == "Foo Empty Post", "Wrong Atom feed empty entry title" + summary = empty_entry.find("{http://www.w3.org/2005/Atom}summary") + assert summary is None, "Atom feed empty entry contains optional summary element" + categories = empty_entry.findall("{http://www.w3.org/2005/Atom}category") + assert len(categories) == 0, "Atom categories rendered for empty post" + content = empty_entry.find("{http://www.w3.org/2005/Atom}content") + assert 'id="foo-empty-post"' in content.text, "Atom feed empty entry missing post ID" + social_path = app.outdir / "blog/social.xml" assert (social_path).exists(), "Social media feed was not built" @@ -54,7 +64,7 @@ def test_feed(app, status, warning): title = social_entry.find("{http://www.w3.org/2005/Atom}title") assert title.text == "Foo Post Title", "Wrong Social media feed entry title" summary = social_entry.find("{http://www.w3.org/2005/Atom}summary") - assert summary.text == "Foo post description.", "Wrong Social media feed entry summary" + assert summary.text == "Foo post description with link.", "Wrong Social media feed entry summary" categories = social_entry.findall("{http://www.w3.org/2005/Atom}category") assert len(categories) == 2, "Wrong number of Social media feed categories" assert categories[0].attrib["label"] == "Foo Tag", "Wrong Social media feed first category"
Atom feeds fail W3C validation ### Atom feeds fail W3C validation The generated Atom feeds don't pass W3C validation ### Expected vs Actual behavior Expected behavior: Generated Atom feeds pass W3C valdation Actual behavior: Generated Atom feeds fail W3C valdation ``` This feed does not validate. line 9, column 2: Duplicate alternate links with the same type and hreflang [help] <entry> ^ In addition, interoperability with the widest range of feed readers could be improved by implementing the following recommendation. line 2, column 0: Missing atom:link with rel="self" [help] <feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en"> ``` Also, the `blog_baseurl` value needs to have a trailing slash or the validator produces a 3rd error: ``` In addition, interoperability with the widest range of feed readers could be improved by implementing the following recommendations. line 3, column 32: Identifier "https://www.rpatterson.net" is not in canonical form (the canonical form would be "https://www.rpatterson.net/") [help] <id>https://www.rpatterson.net</id> ``` I greppd that adding the trailing slash doesn't result in double slashes in the generated site. So the requirement for a trailing slash should be documented or better yet should be added when the site is built/generated. ### Steps to Reproduce 1. Create an Atom blog 2. Publish where publicly available 3. [Validate an Atom feed](https://validator.w3.org/feed/check.cgi?url=https%3A%2F%2Fwww.rpatterson.net%2Fblog%2Fatom.xml) ### System Details ``` $ ./.venv/bin/python --version Python 3.8.6 $ ./.venv/bin/pip freeze ablog==0.10.13 alabaster==0.7.12 Babel==2.9.0 certifi==2020.12.5 chardet==4.0.0 docutils==0.16 feedgen==0.9.0 idna==2.10 imagesize==1.2.0 invoke==1.5.0 Jinja2==2.11.3 lxml==4.6.2 MarkupSafe==1.1.1 packaging==20.9 Pygments==2.8.1 pygments-solarized==0.0.3 pyparsing==2.4.7 python-dateutil==2.8.1 pytz==2021.1 requests==2.25.1 six==1.15.0 snowballstemmer==2.1.0 Sphinx==3.5.2 sphinx-fasvg==0.1.4 sphinx-nervproject-theme==2.0.3 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==1.0.3 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.4 urllib3==1.26.4 watchdog==2.0.2 $ uname -a Linux rpatterson.rpatterson.net 5.8.0-7642-generic #47~1614007149~20.10~82fb226-Ubuntu SMP Tue Feb 23 02 :59:01 UTC x86_64 x86_64 x86_64 GNU/Linux ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_build.py::test_feed" ]
[ "tests/test_build.py::test_build" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference" ], "has_test_patch": true, "is_lite": false }
"2021-03-23T03:26:01Z"
bsd-2-clause
sunpy__ndcube-433
diff --git a/changelog/433.trivial.rst b/changelog/433.trivial.rst new file mode 100644 index 0000000..3ae9cab --- /dev/null +++ b/changelog/433.trivial.rst @@ -0,0 +1,1 @@ +Adds a function to compare the physical types of two WCS objects. diff --git a/ndcube/utils/wcs.py b/ndcube/utils/wcs.py index 25a0e47..228fc24 100644 --- a/ndcube/utils/wcs.py +++ b/ndcube/utils/wcs.py @@ -8,7 +8,7 @@ import numbers from collections import UserDict import numpy as np -from astropy.wcs.wcsapi import low_level_api +from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS, low_level_api __all__ = ['array_indices_for_world_objects', 'convert_between_array_and_pixel_axes', 'calculate_world_indices_from_axes', 'wcs_ivoa_mapping', @@ -429,3 +429,51 @@ def array_indices_for_world_objects(wcs, axes=None): array_index = convert_between_array_and_pixel_axes(pixel_index, wcs.pixel_n_dim) array_indices[oinds] = tuple(array_index[::-1]) # Invert from pixel order to array order return tuple(ai for ai in array_indices if ai) + + +def get_low_level_wcs(wcs, name='wcs'): + """ + Returns a low level WCS object from a low level or high level WCS. + + Parameters + ---------- + wcs: `astropy.wcs.wcsapi.BaseHighLevelWCS` or `astropy.wcs.wcsapi.BaseLowLevelWCS` + The input WCS for getting the low level WCS object. + + name: `str`, optional + Any name for the wcs to be used in the exception that could be raised. + + Returns + ------- + wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS` + """ + + if isinstance(wcs, BaseHighLevelWCS): + return wcs.low_level_wcs + elif isinstance(wcs, BaseLowLevelWCS): + return wcs + else: + raise(f'{name} must implement either BaseHighLevelWCS or BaseLowLevelWCS') + + +def compare_wcs_physical_types(source_wcs, target_wcs): + """ + Checks to see if two WCS objects have the same physical types in the same order. + + Parameters + ---------- + source_wcs : `astropy.wcs.wcsapi.BaseHighLevelWCS` or `astropy.wcs.wcsapi.BaseLowLevelWCS` + The WCS which is currently in use, usually `self.wcs`. + + target_wcs : `astropy.wcs.wcsapi.BaseHighLevelWCS` or `astropy.wcs.wcsapi.BaseLowLevelWCS` + The WCS object on which the NDCube is to be reprojected. + + Returns + ------- + result : `bool` + """ + + source_wcs = get_low_level_wcs(source_wcs, 'source_wcs') + target_wcs = get_low_level_wcs(target_wcs, 'target_wcs') + + return source_wcs.world_axis_physical_types == target_wcs.world_axis_physical_types
sunpy/ndcube
ba1a436d404e0b569a84cf90c59fd4aa3cef1f39
diff --git a/ndcube/utils/tests/test_utils_wcs.py b/ndcube/utils/tests/test_utils_wcs.py index 9361bea..6d811a8 100644 --- a/ndcube/utils/tests/test_utils_wcs.py +++ b/ndcube/utils/tests/test_utils_wcs.py @@ -157,3 +157,8 @@ def test_array_indices_for_world_objects_2(wcs_4d_lt_t_l_ln): array_indices = utils.wcs.array_indices_for_world_objects(wcs_4d_lt_t_l_ln, ('lon', 'time')) assert len(array_indices) == 2 assert array_indices == ((0, 3), (2,)) + + +def test_compare_wcs_physical_types(wcs_4d_t_l_lt_ln, wcs_3d_l_lt_ln): + assert utils.wcs.compare_wcs_physical_types(wcs_4d_t_l_lt_ln, wcs_4d_t_l_lt_ln) is True + assert utils.wcs.compare_wcs_physical_types(wcs_4d_t_l_lt_ln, wcs_3d_l_lt_ln) is False
Validate WCS Function <!-- We know asking good questions takes effort, and we appreciate your time. Thank you. Please be aware that everyone has to follow our code of conduct: https://sunpy.org/coc These comments are hidden when you submit this github issue. Please have a search on our GitHub repository to see if a similar issue has already been posted. If a similar issue is closed, have a quick look to see if you are satisfied by the resolution. If not please go ahead and open an issue! --> ### Description <!-- Provide a general description of the feature you would like. If you prefer, you can also suggest a draft design or API. --> Create a function that accepts two WCS objects, `target_wcs` and `initial_wcs`, and verifies that `target` is the same/equivalent to `initial`. (Better variable names here are welcome.) Initially, the `target_wcs` must describe all axes of the `initial_wcs`. Future versions of this function should handle invariant axes. The use case for this function is as part of verifying the input to the future `NDCube.resample` method.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "ndcube/utils/tests/test_utils_wcs.py::test_compare_wcs_physical_types" ]
[ "ndcube/utils/tests/test_utils_wcs.py::test_convert_between_array_and_pixel_axes", "ndcube/utils/tests/test_utils_wcs.py::test_pixel_axis_to_world_axes", "ndcube/utils/tests/test_utils_wcs.py::test_world_axis_to_pixel_axes", "ndcube/utils/tests/test_utils_wcs.py::test_pixel_axis_to_physical_types", "ndcube/utils/tests/test_utils_wcs.py::test_physical_type_to_pixel_axes", "ndcube/utils/tests/test_utils_wcs.py::test_physical_type_to_world_axis[wl-2]", "ndcube/utils/tests/test_utils_wcs.py::test_physical_type_to_world_axis[em.wl-2]", "ndcube/utils/tests/test_utils_wcs.py::test_get_dependent_pixel_axes", "ndcube/utils/tests/test_utils_wcs.py::test_get_dependent_array_axes", "ndcube/utils/tests/test_utils_wcs.py::test_get_dependent_world_axes", "ndcube/utils/tests/test_utils_wcs.py::test_get_dependent_physical_types", "ndcube/utils/tests/test_utils_wcs.py::test_array_indices_for_world_objects", "ndcube/utils/tests/test_utils_wcs.py::test_array_indices_for_world_objects_2" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2021-06-22T04:59:03Z"
bsd-2-clause
sunpy__sunpy-6011
diff --git a/.pep8speaks.yml b/.pep8speaks.yml deleted file mode 100644 index f845da350..000000000 --- a/.pep8speaks.yml +++ /dev/null @@ -1,26 +0,0 @@ -scanner: - diff_only: False - -pycodestyle: - max-line-length: 100 - exclude: - - setup.py - - docs/conf.py - - sunpy/__init__.py - - sunpy/extern/ - ignore: - - E226 - - E501 - - W503 - - W504 - -descending_issues_order: True - -message: - opened: - header: "Hello @{name}! Thanks for opening this PR. " - footer: "Do see the [Hitchhiker's guide to code style](https://goo.gl/hqbW4r)" - updated: - header: "Hello @{name}! Thanks for updating this PR. " - footer: "" - no_errors: "There are currently no PEP 8 issues detected in this Pull Request. Cheers!" diff --git a/MANIFEST.in b/MANIFEST.in index d10ccd251..9dc53df20 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,24 +1,21 @@ -# Exclude specific files -# All files which are tracked by git and not explicitly excluded here are included by setuptools_scm exclude .codecov.yaml -exclude .mailmap exclude .editorconfig exclude .gitattributes exclude .gitignore -exclude .pep8speaks.yml +exclude .mailmap exclude .pre-commit-config.yaml +exclude .readthedocs.yaml exclude .test_package_pins.txt exclude .zenodo.json -exclude azure-pipelines.yml +exclude asv.conf.json +exclude CITATION.cff exclude readthedocs.yml -exclude rtd-environment.yml -# Prune folders + prune .circleci prune .github prune .jupyter +prune benchmarks prune binder prune changelog prune tools -# This subpackage is only used in development checkouts and should not be -# included in built tarballs prune sunpy/_dev diff --git a/changelog/6011.bugfix.rst b/changelog/6011.bugfix.rst new file mode 100644 index 000000000..e5f317c01 --- /dev/null +++ b/changelog/6011.bugfix.rst @@ -0,0 +1,1 @@ +Fixed `.system_info` so it returns the extra group when an optional dependency is missing. diff --git a/pyproject.toml b/pyproject.toml index d7ee23eec..5e28f304f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [build-system] requires = [ - "setuptools", - "setuptools_scm", + "setuptools>=56,!=61.0.0", + "setuptools_scm[toml]>=6.2", "wheel", "oldest-supported-numpy", "extension-helpers" diff --git a/sunpy/util/sysinfo.py b/sunpy/util/sysinfo.py index 8bb5d1c7e..1bdcf390f 100644 --- a/sunpy/util/sysinfo.py +++ b/sunpy/util/sysinfo.py @@ -5,6 +5,7 @@ import platform from collections import defaultdict from importlib.metadata import PackageNotFoundError, version, requires, distribution +from packaging.markers import Marker from packaging.requirements import Requirement import sunpy.extern.distro as distro @@ -25,20 +26,53 @@ def get_requirements(package): ------- `dict` A dictionary of requirements with keys being the extra requirement group names. + The values are a nested dictionary with keys being the package names and + values being the `packaging.requirements.Requirement` objects. """ requirements: list = requires(package) - requires_dict = defaultdict(list) + requires_dict = defaultdict(dict) for requirement in requirements: req = Requirement(requirement) package_name, package_marker = req.name, req.marker if package_marker and "extra ==" in str(package_marker): group = str(package_marker).split("extra == ")[1].strip('"').strip("'").strip() - requires_dict[group].append(package_name) else: - requires_dict["required"].append(package_name) + group = "required" + # De-duplicate (the same package could appear more than once in the extra == 'all' group) + if package_name in requires_dict[group]: + continue + requires_dict[group][package_name] = req return requires_dict +def resolve_requirement_versions(package_versions): + """ + Resolves a list of requirements for the same package. + + Given a list of package details in the form of `packaging.requirements.Requirement` + objects, combine the specifier, extras, url and marker information to create + a new requirement object. + """ + resolved = Requirement(str(package_versions[0])) + + for package_version in package_versions[1:]: + resolved.specifier = resolved.specifier & package_version.specifier + resolved.extras = resolved.extras.union(package_version.extras) + resolved.url = resolved.url or package_version.url + if resolved.marker and package_version.marker: + resolved.marker = Marker(f"{resolved.marker} or {package_version.marker}") + elif package_version.marker: + resolved.marker = package_version.marker + + return resolved + + +def format_requirement_string(requirement): + formatted_string = f"Missing {requirement}" + formatted_string = formatted_string.replace("or extra ==", "or").strip() + return formatted_string + + def find_dependencies(package="sunpy", extras=None): """ List installed and missing dependencies. @@ -49,17 +83,20 @@ def find_dependencies(package="sunpy", extras=None): """ requirements = get_requirements(package) installed_requirements = {} - missing_requirements = {} + missing_requirements = defaultdict(list) extras = extras or ["required"] for group in requirements: if group not in extras: continue - for package in requirements[group]: + for package, package_details in requirements[group].items(): try: package_version = version(package) installed_requirements[package] = package_version except PackageNotFoundError: - missing_requirements[package] = f"Missing {package}" + missing_requirements[package].append(package_details) + for package, package_versions in missing_requirements.items(): + missing_requirements[package] = format_requirement_string( + resolve_requirement_versions(package_versions)) return missing_requirements, installed_requirements @@ -80,13 +117,27 @@ def missing_dependencies_by_extra(package="sunpy", exclude_extras=None): return missing_dependencies +def get_extra_groups(groups, exclude_extras): + return list(set(groups) - set(exclude_extras)) + + +def get_keys_list(dictionary, sort=True): + keys = [*dictionary.keys()] + if sort: + return sorted(keys) + return keys + + def system_info(): """ Prints ones' system info in an "attractive" fashion. """ - base_reqs = get_requirements("sunpy")["required"] - extra_reqs = get_requirements("sunpy")["all"] - missing_packages, installed_packages = find_dependencies(package="sunpy", extras=["required", "all"]) + requirements = get_requirements("sunpy") + groups = get_keys_list(requirements) + extra_groups = get_extra_groups(groups, ['all', 'dev']) + base_reqs = get_keys_list(requirements['required']) + extra_reqs = get_keys_list(requirements['all']) + missing_packages, installed_packages = find_dependencies(package="sunpy", extras=extra_groups) extra_prop = {"System": platform.system(), "Arch": f"{platform.architecture()[0]}, ({platform.processor()})", "Python": platform.python_version(), diff --git a/tox.ini b/tox.ini index 1fd3b3e5a..5815c0fc9 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,7 @@ envlist = codestyle base_deps requires = - setuptools >= 30.3.0 + setuptools >=56, !=61.0.0 pip >= 19.3.1 tox-pypi-filter >= 0.12 isolated_build = true
sunpy/sunpy
d33a7306add389e7c51c6fa5ee14b7fb0cbee869
diff --git a/sunpy/util/tests/test_sysinfo.py b/sunpy/util/tests/test_sysinfo.py index ad3ea7eeb..bba6c2ce5 100644 --- a/sunpy/util/tests/test_sysinfo.py +++ b/sunpy/util/tests/test_sysinfo.py @@ -1,4 +1,12 @@ -from sunpy.util.sysinfo import find_dependencies, missing_dependencies_by_extra, system_info +from packaging.requirements import Requirement + +from sunpy.util.sysinfo import ( + find_dependencies, + format_requirement_string, + missing_dependencies_by_extra, + resolve_requirement_versions, + system_info, +) def test_find_dependencies(): @@ -17,8 +25,19 @@ def test_find_dependencies(): def test_missing_dependencies_by_extra(): missing = missing_dependencies_by_extra() - assert sorted(list(missing.keys())) == sorted(['all', 'asdf', 'required', 'dask', 'database', 'dev', 'docs', - 'image', 'jpeg2000', 'map', 'net', 'tests', 'timeseries', + assert sorted(list(missing.keys())) == sorted(['all', + 'asdf', + 'required', + 'dask', + 'database', + 'dev', + 'docs', + 'image', + 'jpeg2000', + 'map', + 'net', + 'tests', + 'timeseries', 'visualization']) missing = missing_dependencies_by_extra(exclude_extras=["all"]) assert sorted(list(missing.keys())) == sorted(['asdf', 'required', 'dask', 'database', 'dev', 'docs', @@ -26,6 +45,35 @@ def test_missing_dependencies_by_extra(): 'visualization']) +def test_resolve_requirement_versions(): + package1 = Requirement('test-package[ext1]>=1.1.1; extra == "group1"') + package2 = Requirement('test-package[ext2]<=2.0.0; extra == "group2"') + assert str(resolve_requirement_versions([package1, package2])) == str(Requirement( + 'test-package[ext1,ext2]<=2.0.0,>=1.1.1; extra == "group1" or extra == "group2"')) + + package3 = Requirement('test-package==1.1.0; extra == "group3"') + package4 = Requirement('test-package==1.1.0; extra == "group4"') + assert str(resolve_requirement_versions([package3, package4])) == str( + Requirement('test-package==1.1.0; extra == "group3" or extra == "group4"')) + + package5 = Requirement('test-package; extra == "group5"') + package6 = Requirement('test-package[ext3]@https://foo.com') + assert str(resolve_requirement_versions([package5, package6])) == str( + Requirement('test-package[ext3]@ https://foo.com ; extra == "group5"')) + + +def test_format_requirement_string(): + package1 = Requirement('test-package[ext1]>=1.1.1; extra == "group1"') + assert format_requirement_string(package1) == 'Missing test-package[ext1]>=1.1.1; extra == "group1"' + + package2 = Requirement('test-package>=1.1.1; extra == "group1" or extra == "group2" or extra == "group3"') + assert format_requirement_string( + package2) == 'Missing test-package>=1.1.1; extra == "group1" or "group2" or "group3"' + + package3 = Requirement('test-package>=1.1.1') + assert format_requirement_string(package3) == 'Missing test-package>=1.1.1' + + def test_system_info(capsys): system_info() captured = capsys.readouterr()
system_info shows that all optional dependancies are part of the extra all rather than the more specific one ``` >>> import sunpy >>> sunpy.system_info() ============================== sunpy Installation Information ============================== General ####### OS: Arch Linux (rolling, Linux 5.16.14-arch1-1) Arch: 64bit, () sunpy: 3.1.5 Installation path: /home/stuart/.virtualenvs/sunpy-minimal/lib/python3.10/site-packages Required Dependencies ##################### parfive: 1.5.1 numpy: 1.22.3 astropy: 5.0.2 packaging: 21.3 Optional Dependencies ##################### asdf: Missing, need asdf>=2.6.0; extra == "all" beautifulsoup4: Missing, need beautifulsoup4>=4.8.0; extra == "all" cdflib: Missing, need cdflib!=0.4.0,>=0.3.19; extra == "all" dask: Missing, need dask[array]>=2.0.0; extra == "all" drms: Missing, need drms>=0.6.1; extra == "all" glymur: Missing, need glymur!=0.9.0,!=0.9.5,>=0.8.18; extra == "all" h5netcdf: Missing, need h5netcdf>=0.8.1; extra == "all" h5py: Missing, need h5py>=3.1.0; extra == "all" matplotlib: Missing, need matplotlib>=3.2.0; extra == "all" mpl-animators: Missing, need mpl-animators>=1.0.0; extra == "all" pandas: Missing, need pandas>=1.0.0; extra == "all" python-dateutil: Missing, need python-dateutil>=2.8.0; extra == "all" reproject: Missing, need reproject; extra == "all" scikit-image: Missing, need scikit-image<0.19,>=0.16.0; extra == "all" scipy: Missing, need scipy>=1.3.0; extra == "all" sqlalchemy: Missing, need sqlalchemy>=1.3.4; extra == "all" tqdm: 4.63.0 zeep: Missing, need zeep>=3.4.0; extra == "all" ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "sunpy/util/tests/test_sysinfo.py::test_find_dependencies", "sunpy/util/tests/test_sysinfo.py::test_missing_dependencies_by_extra", "sunpy/util/tests/test_sysinfo.py::test_resolve_requirement_versions", "sunpy/util/tests/test_sysinfo.py::test_format_requirement_string", "sunpy/util/tests/test_sysinfo.py::test_system_info" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_removed_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-03-25T03:25:07Z"
bsd-2-clause
sunpy__sunpy-6597
diff --git a/changelog/6597.bugfix.rst b/changelog/6597.bugfix.rst new file mode 100644 index 000000000..17fb0e881 --- /dev/null +++ b/changelog/6597.bugfix.rst @@ -0,0 +1,1 @@ +Fixed the incorrect calculation in :func:`~sunpy.map.header_helper.make_fitswcs_header` of the rotation matrix from a rotation angle when the pixels are non-square. diff --git a/sunpy/map/header_helper.py b/sunpy/map/header_helper.py index f899b83eb..65faa7908 100644 --- a/sunpy/map/header_helper.py +++ b/sunpy/map/header_helper.py @@ -169,7 +169,7 @@ def _set_rotation_params(meta_wcs, rotation_angle, rotation_matrix): rotation_angle = 0 * u.deg if rotation_angle is not None: - lam = meta_wcs['cdelt1'] / meta_wcs['cdelt2'] + lam = meta_wcs['cdelt2'] / meta_wcs['cdelt1'] p = np.deg2rad(rotation_angle) rotation_matrix = np.array([[np.cos(p), -1 * lam * np.sin(p)],
sunpy/sunpy
f5bf0674f7e53df5cd6010f545ff0414bca17090
diff --git a/sunpy/map/tests/test_header_helper.py b/sunpy/map/tests/test_header_helper.py index fefd01816..1a3d5efe5 100644 --- a/sunpy/map/tests/test_header_helper.py +++ b/sunpy/map/tests/test_header_helper.py @@ -66,7 +66,14 @@ def test_metakeywords(): assert isinstance(meta, dict) -def test_deafult_rotation(map_data, hpc_coord): +def test_scale_conversion(map_data, hpc_coord): + # The header will have cunit1/2 of arcsec + header = make_fitswcs_header(map_data, hpc_coord, scale=[1, 2] * u.arcmin / u.pix) + assert header['cdelt1'] == 60 + assert header['cdelt2'] == 120 + + +def test_default_rotation(map_data, hpc_coord): header = make_fitswcs_header(map_data, hpc_coord) wcs = WCS(header) np.testing.assert_allclose(wcs.wcs.pc, [[1, 0], [0, 1]], atol=1e-5) @@ -79,6 +86,13 @@ def test_rotation_angle(map_data, hpc_coord): np.testing.assert_allclose(wcs.wcs.pc, [[0, -1], [1, 0]], atol=1e-5) +def test_rotation_angle_rectangular_pixels(map_data, hpc_coord): + header = make_fitswcs_header(map_data, hpc_coord, scale=[2, 5] * u.arcsec / u.pix, + rotation_angle=45*u.deg) + wcs = WCS(header) + np.testing.assert_allclose(wcs.wcs.pc, np.sqrt(0.5) * np.array([[1, -2.5], [0.4, 1]]), atol=1e-5) + + def test_rotation_matrix(map_data, hpc_coord): header = make_fitswcs_header(map_data, hpc_coord, rotation_matrix=np.array([[1, 0], [0, 1]]))
Header helper is bugged when creating a PCij matrix from a rotation angle for rectangular pixels As [noted on the mailing list](https://groups.google.com/d/msgid/sunpy/0352a093-e23d-4681-8113-e560bd2be92an%40googlegroups.com), the calculation in the header helper of a PCij matrix from a rotation angle is incorrect when working with rectangular pixels (`CDELT1` not equal to `CDELT2`) because `lam` is the inverse of what it should be: https://github.com/sunpy/sunpy/blob/1fa82dd5e39282ab392ae1b8ac4ad3c66b6d65da/sunpy/map/header_helper.py#L156 The analogous bug was fixed in `GenericMap` in #5766, but was missed in the header helper.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "sunpy/map/tests/test_header_helper.py::test_rotation_angle_rectangular_pixels" ]
[ "sunpy/map/tests/test_header_helper.py::test_metakeywords", "sunpy/map/tests/test_header_helper.py::test_scale_conversion", "sunpy/map/tests/test_header_helper.py::test_default_rotation", "sunpy/map/tests/test_header_helper.py::test_rotation_angle", "sunpy/map/tests/test_header_helper.py::test_rotation_matrix", "sunpy/map/tests/test_header_helper.py::test_hpc_header", "sunpy/map/tests/test_header_helper.py::test_hgc_header", "sunpy/map/tests/test_header_helper.py::test_hgs_header", "sunpy/map/tests/test_header_helper.py::test_instrument_keyword", "sunpy/map/tests/test_header_helper.py::test_quantity_input", "sunpy/map/tests/test_header_helper.py::test_invalid_inputs", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header[CAR-shape0-carrington]", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header[CAR-shape0-stonyhurst]", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header[CAR-shape1-carrington]", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header[CAR-shape1-stonyhurst]", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header[CEA-shape0-carrington]", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header[CEA-shape0-stonyhurst]", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header[CEA-shape1-carrington]", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header[CEA-shape1-stonyhurst]", "sunpy/map/tests/test_header_helper.py::test_make_heliographic_header_invalid_inputs" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2022-11-11T20:13:04Z"
bsd-2-clause
sunpy__sunpy-soar-24
diff --git a/sunpy_soar/client.py b/sunpy_soar/client.py index a2a50f4..968dcef 100644 --- a/sunpy_soar/client.py +++ b/sunpy_soar/client.py @@ -31,19 +31,14 @@ class SOARClient(BaseClient): return qrt @staticmethod - def _do_search(query): + def _construct_url(query): """ - Query the SOAR server with a single query. + Construct search URL. Parameters ---------- query : list[str] List of query items. - - Returns - ------- - astropy.table.QTable - Query results. """ base_url = ('http://soar.esac.esa.int/soar-sl-tap/tap/' 'sync?REQUEST=doQuery&') @@ -63,7 +58,24 @@ class SOARClient(BaseClient): request_str = [f'{item}={request_dict[item]}' for item in request_dict] request_str = '&'.join(request_str) - url = base_url + request_str + return base_url + request_str + + @staticmethod + def _do_search(query): + """ + Query the SOAR server with a single query. + + Parameters + ---------- + query : list[str] + List of query items. + + Returns + ------- + astropy.table.QTable + Query results. + """ + url = SOARClient._construct_url(query) log.debug(f'Getting request from URL: {url}') # Get request info r = requests.get(url) @@ -113,7 +125,7 @@ class SOARClient(BaseClient): for row in query_results: url = base_url + row['Data item ID'] - filepath = str(path).format(file=row['Filename']) + filepath = str(path).format(file=row['Filename'], **row.response_block_map) log.debug(f'Queing URL: {url}') downloader.enqueue_file(url, filename=filepath)
sunpy/sunpy-soar
fc28b4201b90f6e1a3c99b9cee0f8a3c8b7e6d07
diff --git a/sunpy_soar/tests/test_sunpy_soar.py b/sunpy_soar/tests/test_sunpy_soar.py index 45e87f9..a93fef7 100644 --- a/sunpy_soar/tests/test_sunpy_soar.py +++ b/sunpy_soar/tests/test_sunpy_soar.py @@ -70,3 +70,16 @@ def test_no_instrument(): time = a.Time('2020-04-16', '2020-04-17') res = SOARClient().search(time) assert len(res) == 50 + + +def test_download_path(tmp_path): + # Check that we can download things to a custom path using + # the search parameters + id = a.Instrument('EUI') + time = a.Time('2021-02-01', '2021-02-02') + level = a.Level(1) + res = Fido.search(id & time & level) + files = Fido.fetch(res[0, 0], path=tmp_path / '{instrument}') + assert len(files) == 1 + for f in files: + assert 'EUI' in f
SOAR client does not correctly handle string interpolation in `Fido.fetch` When using the `path=...` keyword argument in `Fido.fetch`, string interpolation can be used to direct certain files to certain directories. However, when used with the SOAR client, this throws an exception. This is demonstrated with the following code example: ```python import sunpy_soar from sunpy_soar.attrs import Product from sunpy.net import Fido, attrs as a result = Fido.search(a.Time("2021-02-12 16:0", "2021-02-13 02:00") & a.Level(2) & a.Instrument('EUI') & Product('EUI-FSI174-IMAGE')) files = Fido.fetch(result, path='{instrument}') ``` throws the following exception ```python traceback --------------------------------------------------------------------------- KeyError Traceback (most recent call last) Input In [16], in <cell line: 1>() ----> 1 files = Fido.fetch(result[0][:1], path='data/{instrument}') File ~/anaconda/envs/pyhc-summer-school/lib/python3.9/site-packages/sunpy/net/fido_factory.py:430, in UnifiedDownloaderFactory.fetch(self, path, max_conn, progress, overwrite, downloader, *query_results, **kwargs) 427 raise ValueError(f"Query result has an unrecognized type: {type(query_result)} " 428 "Allowed types are QueryResponseRow, QueryResponseTable or UnifiedResponse.") 429 for block in responses: --> 430 result = block.client.fetch(block, path=path, 431 downloader=downloader, 432 wait=False, **kwargs) 433 if result not in (NotImplemented, None): 434 reslist.append(result) File ~/anaconda/envs/pyhc-summer-school/lib/python3.9/site-packages/sunpy_soar/client.py:118, in SOARClient.fetch(self, query_results, path, downloader, **kwargs) 116 for row in query_results: 117 url = base_url + row['Data item ID'] --> 118 filepath = str(path).format(file=row['Filename']) 119 log.debug(f'Queing URL: {url}') 120 downloader.enqueue_file(url, filename=filepath) KeyError: 'instrument' ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "sunpy_soar/tests/test_sunpy_soar.py::test_download_path" ]
[ "sunpy_soar/tests/test_sunpy_soar.py::test_deprecated_identifier", "sunpy_soar/tests/test_sunpy_soar.py::test_no_results" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2022-05-12T19:46:18Z"
bsd-2-clause
supakeen__pinnwand-234
diff --git a/requirements.txt b/requirements.txt index 232a10f..639a635 100644 --- a/requirements.txt +++ b/requirements.txt @@ -428,9 +428,9 @@ iniconfig==2.0.0 \ isort==5.13.2 \ --hash=sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109 \ --hash=sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6 -jinja2==3.1.2 \ - --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ - --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 +jinja2==3.1.3 \ + --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \ + --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90 license-expression==30.1.1 \ --hash=sha256:42375df653ad85e6f5b4b0385138b2dbea1f5d66360783d8625c3e4f97f11f0c \ --hash=sha256:8d7e5e2de0d04fc104a4f952c440e8f08a5ba63480a0dad015b294770b7e58ec diff --git a/src/pinnwand/http.py b/src/pinnwand/app.py similarity index 100% rename from src/pinnwand/http.py rename to src/pinnwand/app.py diff --git a/src/pinnwand/command.py b/src/pinnwand/command.py index 969056a..dc9ec9a 100644 --- a/src/pinnwand/command.py +++ b/src/pinnwand/command.py @@ -82,7 +82,7 @@ def main(verbose: int, configuration_path: Optional[str]) -> None: def http(port: int, debug: bool) -> None: """Run pinnwand's HTTP server.""" from pinnwand import utility - from pinnwand.http import make_application + from pinnwand.app import make_application # Reap expired pastes on startup (we might've been shut down for a while) utility.reap()
supakeen/pinnwand
9c5049e1a9ff5f5998917dac2d5ff1ea0cff9200
diff --git a/test/conftest.py b/test/conftest.py index e79684f..9c4ed31 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -36,7 +36,7 @@ def pytest_runtest_makereport(item, call): outcome = yield screen_file = str(screenshot_dir / f"{slugify(item.nodeid)}.png") report = outcome.get_result() - extra = getattr(report, "extra", []) + extras = getattr(report, "extras", []) if report.when == "call": if report.failed: if "page" in item.funcargs: @@ -44,8 +44,8 @@ def pytest_runtest_makereport(item, call): make_screenshot(item, page) xfail = hasattr(report, "wasxfail") if (report.skipped and xfail) or (report.failed and not xfail): - extra.append(pytest_html.extras.png(re.sub("test\W*e2e\W*report\W*", "", screen_file))) - report.extra = extra + extras.append(pytest_html.extras.png(re.sub(r"test\W*e2e\W*report\W*", "", screen_file))) + report.extras = extras def make_screenshot(item, page): diff --git a/test/integration/test_http_api.py b/test/integration/test_http_api.py index 9b8df48..0f03f81 100644 --- a/test/integration/test_http_api.py +++ b/test/integration/test_http_api.py @@ -10,7 +10,7 @@ configuration.ratelimit["read"]["capacity"] = 2**64 - 1 configuration.ratelimit["create"]["capacity"] = 2**64 - 1 configuration.ratelimit["delete"]["capacity"] = 2**64 - 1 -from pinnwand import configuration, database, http, utility +from pinnwand import configuration, database, app, utility class DeprecatedAPITestCase(tornado.testing.AsyncHTTPTestCase): @@ -19,7 +19,7 @@ class DeprecatedAPITestCase(tornado.testing.AsyncHTTPTestCase): database.Base.metadata.create_all(database._engine) def get_app(self) -> tornado.web.Application: - return http.make_application() + return app.make_application() def test_api_new(self) -> None: response = self.fetch( @@ -291,7 +291,7 @@ class APIv1TestCase(tornado.testing.AsyncHTTPTestCase): database.Base.metadata.create_all(database._engine) def get_app(self) -> tornado.web.Application: - return http.make_application() + return app.make_application() def test_api_new(self) -> None: response = self.fetch( diff --git a/test/integration/test_http_curl.py b/test/integration/test_http_curl.py index 08d0707..514fc08 100644 --- a/test/integration/test_http_curl.py +++ b/test/integration/test_http_curl.py @@ -10,7 +10,7 @@ configuration.ratelimit["read"]["capacity"] = 2**64 - 1 configuration.ratelimit["create"]["capacity"] = 2**64 - 1 configuration.ratelimit["delete"]["capacity"] = 2**64 - 1 -from pinnwand import database, http +from pinnwand import database, app class CurlTestCase(tornado.testing.AsyncHTTPTestCase): @@ -19,7 +19,7 @@ class CurlTestCase(tornado.testing.AsyncHTTPTestCase): database.Base.metadata.create_all(database._engine) def get_app(self) -> tornado.web.Application: - return http.make_application() + return app.make_application() def test_curl_post_no_lexer(self) -> None: response = self.fetch( diff --git a/test/integration/test_http_website.py b/test/integration/test_http_website.py index ffb2460..82cfb0e 100644 --- a/test/integration/test_http_website.py +++ b/test/integration/test_http_website.py @@ -9,7 +9,7 @@ configuration.ratelimit["read"]["capacity"] = 2**64 - 1 configuration.ratelimit["create"]["capacity"] = 2**64 - 1 configuration.ratelimit["delete"]["capacity"] = 2**64 - 1 -from pinnwand import database, http +from pinnwand import database, app class WebsiteTestCase(tornado.testing.AsyncHTTPTestCase): @@ -18,7 +18,7 @@ class WebsiteTestCase(tornado.testing.AsyncHTTPTestCase): database.Base.metadata.create_all(database._engine) def get_app(self) -> tornado.web.Application: - return http.make_application() + return app.make_application() def test_website_index(self) -> None: response = self.fetch( @@ -402,7 +402,7 @@ class DeprecatedWebsiteTestCase(tornado.testing.AsyncHTTPTestCase): database.Base.metadata.create_all(database._engine) def get_app(self) -> tornado.web.Application: - return http.make_application() + return app.make_application() def test_website_index_post_no_lexer(self) -> None: response = self.fetch(
ModuleNotFoundError: No module named 'http.cookies'; 'http' is not a package This error seems to happen particulary in `Pycharm` when I try to make a new config to run the `pinnwand` package with speciefic attributes to target the different CLI commands. It first drove me crazy, but the issue is coming from a name conflict with the built-in Python `http` library . Debugging is alot easier with `Pycharm`, at least to me. Would you be up to renaming the `http` module to `app`, since what it does eventually is make the app instance that'll later be ran.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_get_expiries", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_get_lexers", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_empty_code", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_invalid_lexer", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_large_file", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_no_code", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_no_expiry", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_no_lexer", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_small_file", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_space_code", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_wrong_method", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_remove", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_return_filename", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_show", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_show_nonexistent", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_show_spaced", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_show_web", "test/integration/test_http_api.py::APIv1TestCase::test_api_detail_many_files", "test/integration/test_http_api.py::APIv1TestCase::test_api_new", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_invalid_body", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_invalid_lexer", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_large_file", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_many_file", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_many_file_large", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_no_content", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_no_expiry", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_no_files", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_no_lexer", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_small_file", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_wrong_method", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_empty_raw", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_no_expiry", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_no_lexer", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_no_raw", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_nonexistent_expiry", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_nonexistent_lexer", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_spaced_raw", "test/integration/test_http_curl.py::CurlTestCase::test_curl_raw", "test/integration/test_http_curl.py::CurlTestCase::test_curl_raw_spaced", "test/integration/test_http_curl.py::CurlTestCase::test_curl_remove", "test/integration/test_http_curl.py::CurlTestCase::test_curl_show", "test/integration/test_http_website.py::WebsiteTestCase::test_website_about", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_empty_filenames", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_empty_lexers", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_empty_raws", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_many", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_many_too_large", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_mismatched", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_multiple", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_nonexistent_expiry", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_nonexistent_lexer", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_nothing", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_only_filenames", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_only_lexers", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_only_raws", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_only_xsrf", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_raw_only_space", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_single", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_too_large", "test/integration/test_http_website.py::WebsiteTestCase::test_website_expiry", "test/integration/test_http_website.py::WebsiteTestCase::test_website_hex_nonexistent_paste", "test/integration/test_http_website.py::WebsiteTestCase::test_website_index", "test/integration/test_http_website.py::WebsiteTestCase::test_website_index_with_lexer", "test/integration/test_http_website.py::WebsiteTestCase::test_website_index_with_nonexistent_lexer", "test/integration/test_http_website.py::WebsiteTestCase::test_website_nonexistent_page", "test/integration/test_http_website.py::WebsiteTestCase::test_website_raw_nonexistent_paste", "test/integration/test_http_website.py::WebsiteTestCase::test_website_removal", "test/integration/test_http_website.py::WebsiteTestCase::test_website_show_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_download", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_download_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_hex", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_hex_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_empty_code", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_no_code", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_no_expiry", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_no_lexer", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_nonexistent_expiry", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_nonexistent_lexer", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_logo", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_raw", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_remove", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_remove_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_repaste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_repaste_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_show" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2024-01-11T13:33:42Z"
mit
supakeen__pinnwand-261
diff --git a/pdm.lock b/pdm.lock index 8dd8de3..75291bf 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "dev"] strategy = ["cross_platform"] lock_version = "4.4.1" -content_hash = "sha256:a1d2e245c8350af379963adad03a3a7d34992b3baf4f5302faf3588b6dc977fc" +content_hash = "sha256:ed8c7d415898ea8f15e65023b6c8d43be31aef16a0536d278f33bfa43911c699" [[package]] name = "bandit" @@ -25,7 +25,7 @@ files = [ [[package]] name = "black" -version = "24.2.0" +version = "24.3.0" requires_python = ">=3.8" summary = "The uncompromising code formatter." dependencies = [ @@ -38,28 +38,28 @@ dependencies = [ "typing-extensions>=4.0.1; python_version < \"3.11\"", ] files = [ - {file = "black-24.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29"}, - {file = "black-24.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430"}, - {file = "black-24.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f"}, - {file = "black-24.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a"}, - {file = "black-24.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd"}, - {file = "black-24.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2"}, - {file = "black-24.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92"}, - {file = "black-24.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23"}, - {file = "black-24.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b"}, - {file = "black-24.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9"}, - {file = "black-24.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693"}, - {file = "black-24.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982"}, - {file = "black-24.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4"}, - {file = "black-24.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218"}, - {file = "black-24.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0"}, - {file = "black-24.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d"}, - {file = "black-24.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8"}, - {file = "black-24.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8"}, - {file = "black-24.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540"}, - {file = "black-24.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31"}, - {file = "black-24.2.0-py3-none-any.whl", hash = "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6"}, - {file = "black-24.2.0.tar.gz", hash = "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894"}, + {file = "black-24.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395"}, + {file = "black-24.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995"}, + {file = "black-24.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"}, + {file = "black-24.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0"}, + {file = "black-24.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9"}, + {file = "black-24.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597"}, + {file = "black-24.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d"}, + {file = "black-24.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5"}, + {file = "black-24.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f"}, + {file = "black-24.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11"}, + {file = "black-24.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4"}, + {file = "black-24.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5"}, + {file = "black-24.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837"}, + {file = "black-24.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd"}, + {file = "black-24.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213"}, + {file = "black-24.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959"}, + {file = "black-24.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb"}, + {file = "black-24.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7"}, + {file = "black-24.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7"}, + {file = "black-24.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f"}, + {file = "black-24.3.0-py3-none-any.whl", hash = "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93"}, + {file = "black-24.3.0.tar.gz", hash = "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f"}, ] [[package]] @@ -1160,6 +1160,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, diff --git a/pyproject.toml b/pyproject.toml index 49d15cd..071a2d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,7 @@ license = {text = "MIT"} dev = [ "pytest>=7.3.1", "coverage>=7.2.7", - "black>=23.3.0", + "black>=24.3.0", "pytest-cov>=4.1.0", "pre-commit>=3.3.2", "mypy>=1.3.0", diff --git a/requirements.txt b/requirements.txt index da92629..533f74b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,29 +4,29 @@ bandit==1.7.8 \ --hash=sha256:36de50f720856ab24a24dbaa5fee2c66050ed97c1477e0a1159deab1775eab6b \ --hash=sha256:509f7af645bc0cd8fd4587abc1a038fc795636671ee8204d502b933aee44f381 -black==24.2.0 \ - --hash=sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8 \ - --hash=sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8 \ - --hash=sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd \ - --hash=sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9 \ - --hash=sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31 \ - --hash=sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92 \ - --hash=sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f \ - --hash=sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29 \ - --hash=sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4 \ - --hash=sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693 \ - --hash=sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218 \ - --hash=sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a \ - --hash=sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23 \ - --hash=sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0 \ - --hash=sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982 \ - --hash=sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894 \ - --hash=sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540 \ - --hash=sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430 \ - --hash=sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b \ - --hash=sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2 \ - --hash=sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6 \ - --hash=sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d +black==24.3.0 \ + --hash=sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f \ + --hash=sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93 \ + --hash=sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11 \ + --hash=sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0 \ + --hash=sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9 \ + --hash=sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5 \ + --hash=sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213 \ + --hash=sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d \ + --hash=sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7 \ + --hash=sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837 \ + --hash=sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f \ + --hash=sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395 \ + --hash=sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995 \ + --hash=sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f \ + --hash=sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597 \ + --hash=sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959 \ + --hash=sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5 \ + --hash=sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb \ + --hash=sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4 \ + --hash=sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7 \ + --hash=sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd \ + --hash=sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7 boolean-py==4.0 \ --hash=sha256:17b9a181630e43dde1851d42bef546d616d5d9b4480357514597e78b203d06e4 \ --hash=sha256:2876f2051d7d6394a531d82dc6eb407faa0b01a0a0b3083817ccd7323b8d96bd diff --git a/src/pinnwand/defensive.py b/src/pinnwand/defensive.py index 0b64d53..4a5ebd5 100644 --- a/src/pinnwand/defensive.py +++ b/src/pinnwand/defensive.py @@ -1,6 +1,5 @@ -import ipaddress import re -from typing import Dict, Union +from typing import Dict from functools import wraps import token_bucket @@ -13,18 +12,10 @@ from pinnwand.configuration import Configuration, ConfigurationProvider log = logger.get_logger(__name__) -ratelimit_area: Dict[ - str, - Dict[ - Union[ipaddress.IPv4Address, ipaddress.IPv6Address], - token_bucket.Limiter, - ], -] = {} +ratelimit_area: Dict[str, token_bucket.Limiter] = {} -def should_be_ratelimited( - request: HTTPServerRequest, area: str = "global" -) -> bool: +def should_be_ratelimited(ip_address: str, area: str = "global") -> bool: """Test if a requesting IP is ratelimited for a certain area. Areas are different functionalities of the website, for example 'view' or 'input' to differentiate between creating new pastes (low volume) or high volume @@ -38,20 +29,17 @@ def should_be_ratelimited( configuration: Configuration = ConfigurationProvider.get_config() if area not in ratelimit_area: - ratelimit_area[area] = {} - - # TODO handle valueerror as validationerror? - address = ipaddress.ip_address(str(request.remote_ip)) - - if address not in ratelimit_area[area]: - ratelimit_area[area][address] = token_bucket.Limiter( + ratelimit_area[area] = token_bucket.Limiter( configuration.ratelimit[area]["refill"], configuration.ratelimit[area]["capacity"], token_bucket.MemoryStorage(), ) - if not ratelimit_area[area][address].consume(1): - log.warning("%s hit rate limit for %r", address, area) + if not ratelimit_area[area].consume( + ip_address.encode("utf-8"), + configuration.ratelimit[area]["consume"], + ): + log.warning("%s hit rate limit for %r", ip_address, area) return True return False @@ -63,7 +51,7 @@ def ratelimit(area: str): def wrapper(func): @wraps(func) def inner(request_handler: RequestHandler, *args, **kwargs): - if should_be_ratelimited(request_handler.request, area): + if should_be_ratelimited(request_handler.request.remote_ip, area): raise error.RatelimitError() return func(request_handler, *args, **kwargs) diff --git a/src/pinnwand/handler/api_curl.py b/src/pinnwand/handler/api_curl.py index 1fca553..1cf569e 100644 --- a/src/pinnwand/handler/api_curl.py +++ b/src/pinnwand/handler/api_curl.py @@ -31,7 +31,6 @@ class Create(tornado.web.RequestHandler): @defensive.ratelimit(area="create") def post(self) -> None: - configuration: Configuration = ConfigurationProvider.get_config() lexer = self.get_body_argument("lexer", "text") raw = self.get_body_argument("raw", "", strip=False)
supakeen/pinnwand
97525e394f263dd6aaa56da6ba090e262d536631
diff --git a/test/integration/test_http_api.py b/test/integration/test_http_api.py index 47b6dd5..672e8ac 100644 --- a/test/integration/test_http_api.py +++ b/test/integration/test_http_api.py @@ -1,20 +1,23 @@ import json +import copy import urllib.parse - +import unittest.mock import tornado.testing import tornado.web from pinnwand.configuration import Configuration, ConfigurationProvider +from pinnwand import app, utility +from pinnwand.database import manager, utils as database_utils + configuration: Configuration = ConfigurationProvider.get_config() -configuration._ratelimit["read"]["capacity"] = 2**64 - 1 -configuration._ratelimit["create"]["capacity"] = 2**64 - 1 -configuration._ratelimit["delete"]["capacity"] = 2**64 - 1 -from pinnwand import app, utility -from pinnwand.database import manager, utils as database_utils +ratelimit_copy = copy.deepcopy(configuration._ratelimit) +for area in ("read", "create", "delete"): + ratelimit_copy[area]["capacity"] = 2**64 - 1 [email protected](configuration._ratelimit, ratelimit_copy) class DeprecatedAPITestCase(tornado.testing.AsyncHTTPTestCase): def setUp(self) -> None: super().setUp() @@ -117,7 +120,6 @@ class DeprecatedAPITestCase(tornado.testing.AsyncHTTPTestCase): ), ) - print(response.body) assert response.code == 200 def test_api_new_large_file(self) -> None: diff --git a/test/integration/test_http_curl.py b/test/integration/test_http_curl.py index d648b74..489e23d 100644 --- a/test/integration/test_http_curl.py +++ b/test/integration/test_http_curl.py @@ -1,20 +1,22 @@ import re import urllib.parse - +import unittest.mock import tornado.testing import tornado.web - +import copy from pinnwand.configuration import Configuration, ConfigurationProvider -configuration: Configuration = ConfigurationProvider.get_config() - -configuration._ratelimit["read"]["capacity"] = 2**64 - 1 -configuration._ratelimit["create"]["capacity"] = 2**64 - 1 -configuration._ratelimit["delete"]["capacity"] = 2**64 - 1 from pinnwand import app from pinnwand.database import manager, utils as database_utils +configuration: Configuration = ConfigurationProvider.get_config() +ratelimit_copy = copy.deepcopy(configuration._ratelimit) +for area in ("read", "create", "delete"): + ratelimit_copy[area]["capacity"] = 2**64 - 1 + + [email protected](configuration._ratelimit, ratelimit_copy) class CurlTestCase(tornado.testing.AsyncHTTPTestCase): def setUp(self) -> None: super().setUp() @@ -131,7 +133,6 @@ class CurlTestCase(tornado.testing.AsyncHTTPTestCase): .group(1) # type: ignore .decode("ascii") ) - print(paste) paste = urllib.parse.urlparse(paste).path response = self.fetch( @@ -157,8 +158,6 @@ class CurlTestCase(tornado.testing.AsyncHTTPTestCase): ) paste = urllib.parse.urlparse(paste).path - print(repr(paste)) - response = self.fetch( paste, method="GET", @@ -262,8 +261,6 @@ class CurlTestCase(tornado.testing.AsyncHTTPTestCase): follow_redirects=False, ) - print(response.body) - paste = ( re.search(b"Paste URL: (.*)", response.body) .group(1) # type: ignore diff --git a/test/integration/test_http_ratelimit.py b/test/integration/test_http_ratelimit.py new file mode 100644 index 0000000..ce3e60b --- /dev/null +++ b/test/integration/test_http_ratelimit.py @@ -0,0 +1,90 @@ +import copy +import time +import unittest.mock + +import tornado.testing +import tornado.web + +from pinnwand import app, configuration, defensive + + +class RateLimitTestCase(tornado.testing.AsyncHTTPTestCase): + + def get_app(self) -> tornado.web.Application: + return app.make_application() + + def test_ratelimit_verification_on_endpoints(self): + with unittest.mock.patch("pinnwand.defensive.should_be_ratelimited") as patch: + patch.return_value = False + + self.fetch( + "/", + method="GET", + ) + + patch.assert_called() + patch.reset_mock() + + def test_ratelimit_application_on_one_client(self): + config = configuration.ConfigurationProvider.get_config() + ratelimlit_copy = copy.deepcopy(config._ratelimit) + ratelimlit_copy["read"]["capacity"] = 2 + ratelimlit_copy["read"]["consume"] = 2 + ratelimlit_copy["read"]["refill"] = 1 + + with unittest.mock.patch.dict("pinnwand.defensive.ConfigurationProvider._config._ratelimit", ratelimlit_copy): + with unittest.mock.patch.dict("pinnwand.defensive.ratelimit_area", clear=True): + response = self.fetch( + "/", + method="GET", + ) + + assert response.code == 200 + + response = self.fetch( + "/", + method="GET", + ) + + assert response.code == 429 + + def test_ratelimit_application_on_multiple_clients(self): + config = configuration.ConfigurationProvider.get_config() + ratelimlit_copy = copy.deepcopy(config._ratelimit) + area = "read" + ratelimlit_copy[area]["capacity"] = 10 + ratelimlit_copy[area]["consume"] = 7 + ratelimlit_copy[area]["refill"] = 1 + + ip1 = "192.168.15.32" + ip2 = "10.45.134.23" + + with unittest.mock.patch.dict("pinnwand.defensive.ConfigurationProvider._config._ratelimit", ratelimlit_copy): + with unittest.mock.patch.dict("pinnwand.defensive.ratelimit_area", clear=True): + assert defensive.should_be_ratelimited(ip1, area) is False + assert defensive.should_be_ratelimited(ip1, area) is True + assert defensive.should_be_ratelimited(ip2, area) is False + assert defensive.should_be_ratelimited(ip2, area) is True + assert defensive.should_be_ratelimited(ip2, area) is True + time.sleep(10) # Give it enough time to replenish + assert defensive.should_be_ratelimited(ip1, area) is False + assert defensive.should_be_ratelimited(ip2, area) is False + + def test_bucket_tokens_consumption(self): + config = configuration.ConfigurationProvider.get_config() + ratelimlit_copy = copy.deepcopy(config._ratelimit) + area = "read" + consumption = 7 + capacity = 10 + ratelimlit_copy[area]["capacity"] = capacity + ratelimlit_copy[area]["consume"] = consumption + ratelimlit_copy[area]["refill"] = 1 + + ip = "192.168.15.32" + with unittest.mock.patch.dict("pinnwand.defensive.ConfigurationProvider._config._ratelimit", ratelimlit_copy): + with unittest.mock.patch.dict("pinnwand.defensive.ratelimit_area", clear=True): + defensive.should_be_ratelimited(ip, area) + limiter = defensive.ratelimit_area[area] + tokens_remaining = limiter._storage.get_token_count(ip.encode("utf-8")) + assert tokens_remaining == capacity - consumption + diff --git a/test/integration/test_http_website.py b/test/integration/test_http_website.py index cbe19ea..630bd26 100644 --- a/test/integration/test_http_website.py +++ b/test/integration/test_http_website.py @@ -1,20 +1,22 @@ import urllib.parse - +import unittest.mock import tornado.testing import tornado.web - +import copy from pinnwand.configuration import Configuration, ConfigurationProvider +from pinnwand import app +from pinnwand.database import manager, utils as database_utils + configuration: Configuration = ConfigurationProvider.get_config() -configuration._ratelimit["read"]["capacity"] = 2**64 - 1 -configuration._ratelimit["create"]["capacity"] = 2**64 - 1 -configuration._ratelimit["delete"]["capacity"] = 2**64 - 1 -from pinnwand import app -from pinnwand.database import manager, utils as database_utils +ratelimit_copy = copy.deepcopy(configuration._ratelimit) +for area in ("read", "create", "delete"): + ratelimit_copy[area]["capacity"] = 2**64 - 1 [email protected](configuration._ratelimit, ratelimit_copy) class WebsiteTestCase(tornado.testing.AsyncHTTPTestCase): def setUp(self) -> None: super().setUp()
Fix consume ratelimit config Configuration for rate limits lets you set the consume rate: https://github.com/supakeen/pinnwand/blob/e737aba402548f83bc458c62ac6e10e69419f2b1/src/pinnwand/configuration.py#L12-L26 However, this configuration is not used, instead the consume value is always 1: https://github.com/supakeen/pinnwand/blob/e737aba402548f83bc458c62ac6e10e69419f2b1/src/pinnwand/defensive.py#L46
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/integration/test_http_ratelimit.py::RateLimitTestCase::test_bucket_tokens_consumption", "test/integration/test_http_ratelimit.py::RateLimitTestCase::test_ratelimit_application_on_multiple_clients", "test/integration/test_http_ratelimit.py::RateLimitTestCase::test_ratelimit_application_on_one_client" ]
[ "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_get_expiries", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_get_lexers", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_empty_code", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_invalid_lexer", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_large_file", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_no_code", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_no_expiry", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_no_lexer", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_small_file", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_space_code", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_new_wrong_method", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_remove", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_return_filename", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_show", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_show_nonexistent", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_show_spaced", "test/integration/test_http_api.py::DeprecatedAPITestCase::test_api_show_web", "test/integration/test_http_api.py::APIv1TestCase::test_api_detail_many_files", "test/integration/test_http_api.py::APIv1TestCase::test_api_new", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_invalid_body", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_invalid_lexer", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_large_file", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_many_file", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_many_file_large", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_no_content", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_no_expiry", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_no_files", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_no_lexer", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_small_file", "test/integration/test_http_api.py::APIv1TestCase::test_api_new_wrong_method", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_empty_raw", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_no_expiry", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_no_lexer", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_no_raw", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_nonexistent_expiry", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_nonexistent_lexer", "test/integration/test_http_curl.py::CurlTestCase::test_curl_post_spaced_raw", "test/integration/test_http_curl.py::CurlTestCase::test_curl_raw", "test/integration/test_http_curl.py::CurlTestCase::test_curl_raw_spaced", "test/integration/test_http_curl.py::CurlTestCase::test_curl_remove", "test/integration/test_http_curl.py::CurlTestCase::test_curl_show", "test/integration/test_http_ratelimit.py::RateLimitTestCase::test_ratelimit_verification_on_endpoints", "test/integration/test_http_website.py::WebsiteTestCase::test_website_about", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_empty_filenames", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_empty_lexers", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_empty_raws", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_many", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_many_too_large", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_mismatched", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_multiple", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_nonexistent_expiry", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_nonexistent_lexer", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_nothing", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_only_filenames", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_only_lexers", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_only_raws", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_only_xsrf", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_raw_only_space", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_single", "test/integration/test_http_website.py::WebsiteTestCase::test_website_create_post_too_large", "test/integration/test_http_website.py::WebsiteTestCase::test_website_expiry", "test/integration/test_http_website.py::WebsiteTestCase::test_website_hex_nonexistent_paste", "test/integration/test_http_website.py::WebsiteTestCase::test_website_index", "test/integration/test_http_website.py::WebsiteTestCase::test_website_index_with_lexer", "test/integration/test_http_website.py::WebsiteTestCase::test_website_index_with_nonexistent_lexer", "test/integration/test_http_website.py::WebsiteTestCase::test_website_nonexistent_page", "test/integration/test_http_website.py::WebsiteTestCase::test_website_raw_nonexistent_paste", "test/integration/test_http_website.py::WebsiteTestCase::test_website_removal", "test/integration/test_http_website.py::WebsiteTestCase::test_website_show_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_download", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_download_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_hex", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_hex_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_empty_code", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_no_code", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_no_expiry", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_no_lexer", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_nonexistent_expiry", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_index_post_nonexistent_lexer", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_logo", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_raw", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_remove", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_remove_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_repaste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_repaste_nonexistent_paste", "test/integration/test_http_website.py::DeprecatedWebsiteTestCase::test_website_show" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-20T10:07:41Z"
mit
svenevs__ci_exec-29
diff --git a/ci_exec/parsers/cmake_parser.py b/ci_exec/parsers/cmake_parser.py index 5fa9cd8..267904f 100644 --- a/ci_exec/parsers/cmake_parser.py +++ b/ci_exec/parsers/cmake_parser.py @@ -261,6 +261,13 @@ class CMakeParser(argparse.ArgumentParser): __ https://cmake.org/cmake/help/latest/generator/Ninja.html """ + ninja_multi_generator = {"Ninja Multi-Config"} + """ + The `Ninja Multi-Config Generator`__. + + __ https://cmake.org/cmake/help/latest/generator/Ninja%20Multi-Config.html + """ + visual_studio_generators = { "Visual Studio 9 2008", "Visual Studio 10 2010", @@ -268,7 +275,8 @@ class CMakeParser(argparse.ArgumentParser): "Visual Studio 12 2013", "Visual Studio 14 2015", "Visual Studio 15 2017", - "Visual Studio 16 2019" + "Visual Studio 16 2019", + "Visual Studio 17 2022" } """ The `Visual Studio Generators`__. @@ -286,7 +294,8 @@ class CMakeParser(argparse.ArgumentParser): @classmethod def is_multi_config_generator(cls, generator: str) -> bool: """Whether or not string ``generator`` is a multi-config generator.""" - return generator in (cls.visual_studio_generators | cls.other_generators) + return generator in (cls.visual_studio_generators | cls.other_generators | + cls.ninja_multi_generator) @classmethod def is_single_config_generator(cls, generator: str) -> bool: @@ -310,7 +319,8 @@ class CMakeParser(argparse.ArgumentParser): help="Generator to use (CMake -G flag).", choices=sorted( self.makefile_generators | self.ninja_generator | - self.visual_studio_generators | self.other_generators + self.ninja_multi_generator | self.visual_studio_generators | + self.other_generators ) )
svenevs/ci_exec
1b513bfc2720334c7eda4a49519013fa644a8a9d
diff --git a/tests/parsers/cmake_parser.py b/tests/parsers/cmake_parser.py index 3b60354..10308f0 100644 --- a/tests/parsers/cmake_parser.py +++ b/tests/parsers/cmake_parser.py @@ -53,7 +53,8 @@ def test_cmake_parser_is_x_config_generator(): assert CMakeParser.is_single_config_generator(g) assert not CMakeParser.is_multi_config_generator(g) - for g in chain(CMakeParser.visual_studio_generators, CMakeParser.other_generators): + for g in chain(CMakeParser.visual_studio_generators, CMakeParser.other_generators, + CMakeParser.ninja_multi_generator): assert not CMakeParser.is_single_config_generator(g) assert CMakeParser.is_multi_config_generator(g) @@ -492,3 +493,9 @@ def test_cmake_parser_single_vs_multi_configure_build_args(): ]) assert "-DCMAKE_BUILD_TYPE=Debug" not in args.cmake_configure_args assert args.cmake_build_args == ["--config", "Debug"] + + args = parser.parse_args([ + "-G", "Ninja Multi-Config", "--build-type", "Debug" + ]) + assert "-DCMAKE_BUILD_TYPE=Debug" not in args.cmake_configure_args + assert args.cmake_build_args == ["--config", "Debug"]
add ninja multi config generator https://cmake.org/cmake/help/latest/generator/Ninja%20Multi-Config.html Wait until 3.17 comes out to make it easier to test :smirk:
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/parsers/cmake_parser.py::test_cmake_parser_is_x_config_generator", "tests/parsers/cmake_parser.py::test_cmake_parser_single_vs_multi_configure_build_args" ]
[ "tests/parsers/cmake_parser.py::test_cmake_parser_defaults", "tests/parsers/cmake_parser.py::test_cmake_parser_add_argument_failues", "tests/parsers/cmake_parser.py::test_cmake_parser_get_argument", "tests/parsers/cmake_parser.py::test_cmake_parser_remove", "tests/parsers/cmake_parser.py::test_cmake_parser_set_argument", "tests/parsers/cmake_parser.py::test_cmake_parser_extra_args", "tests/parsers/cmake_parser.py::test_cmake_parser_shared_or_static", "tests/parsers/cmake_parser.py::test_cmake_parser_parse_args_cmake_configure_args" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-08-21T01:14:14Z"
apache-2.0
swar__nba_api-286
diff --git a/src/nba_api/stats/library/data.py b/src/nba_api/stats/library/data.py index 9bfb241..daf40e2 100644 --- a/src/nba_api/stats/library/data.py +++ b/src/nba_api/stats/library/data.py @@ -4848,36 +4848,37 @@ team_index_year_founded = 3 team_index_city = 4 team_index_full_name = 5 team_index_state = 6 +team_index_championship_year = 7 teams = [ - [1610612737, 'ATL', 'Hawks', 1949, 'Atlanta', 'Atlanta Hawks', 'Atlanta'], - [1610612738, 'BOS', 'Celtics', 1946, 'Boston', 'Boston Celtics', 'Massachusetts'], - [1610612739, 'CLE', 'Cavaliers', 1970, 'Cleveland', 'Cleveland Cavaliers', 'Ohio'], - [1610612740, 'NOP', 'Pelicans', 2002, 'New Orleans', 'New Orleans Pelicans', 'Louisiana'], - [1610612741, 'CHI', 'Bulls', 1966, 'Chicago', 'Chicago Bulls', 'Illinois'], - [1610612742, 'DAL', 'Mavericks', 1980, 'Dallas', 'Dallas Mavericks', 'Texas'], - [1610612743, 'DEN', 'Nuggets', 1976, 'Denver', 'Denver Nuggets', 'Colorado'], - [1610612744, 'GSW', 'Warriors', 1946, 'Golden State', 'Golden State Warriors', 'California'], - [1610612745, 'HOU', 'Rockets', 1967, 'Houston', 'Houston Rockets', 'Texas'], - [1610612746, 'LAC', 'Clippers', 1970, 'Los Angeles', 'Los Angeles Clippers', 'California'], - [1610612747, 'LAL', 'Lakers', 1948, 'Los Angeles', 'Los Angeles Lakers', 'California'], - [1610612748, 'MIA', 'Heat', 1988, 'Miami', 'Miami Heat', 'Florida'], - [1610612749, 'MIL', 'Bucks', 1968, 'Milwaukee', 'Milwaukee Bucks', 'Wisconsin'], - [1610612750, 'MIN', 'Timberwolves', 1989, 'Minnesota', 'Minnesota Timberwolves', 'Minnesota'], - [1610612751, 'BKN', 'Nets', 1976, 'Brooklyn', 'Brooklyn Nets', 'New York'], - [1610612752, 'NYK', 'Knicks', 1946, 'New York', 'New York Knicks', 'New York'], - [1610612753, 'ORL', 'Magic', 1989, 'Orlando', 'Orlando Magic', 'Florida'], - [1610612754, 'IND', 'Pacers', 1976, 'Indiana', 'Indiana Pacers', 'Indiana'], - [1610612755, 'PHI', '76ers', 1949, 'Philadelphia', 'Philadelphia 76ers', 'Pennsylvania'], - [1610612756, 'PHX', 'Suns', 1968, 'Phoenix', 'Phoenix Suns', 'Arizona'], - [1610612757, 'POR', 'Trail Blazers', 1970, 'Portland', 'Portland Trail Blazers', 'Oregon'], - [1610612758, 'SAC', 'Kings', 1948, 'Sacramento', 'Sacramento Kings', 'California'], - [1610612759, 'SAS', 'Spurs', 1976, 'San Antonio', 'San Antonio Spurs', 'Texas'], - [1610612760, 'OKC', 'Thunder', 1967, 'Oklahoma City', 'Oklahoma City Thunder', 'Oklahoma'], - [1610612761, 'TOR', 'Raptors', 1995, 'Toronto', 'Toronto Raptors', 'Ontario'], - [1610612762, 'UTA', 'Jazz', 1974, 'Utah', 'Utah Jazz', 'Utah'], - [1610612763, 'MEM', 'Grizzlies', 1995, 'Memphis', 'Memphis Grizzlies', 'Tennessee'], - [1610612764, 'WAS', 'Wizards', 1961, 'Washington', 'Washington Wizards', 'District of Columbia'], - [1610612765, 'DET', 'Pistons', 1948, 'Detroit', 'Detroit Pistons', 'Michigan'], - [1610612766, 'CHA', 'Hornets', 1988, 'Charlotte', 'Charlotte Hornets', 'North Carolina'] + [1610612737, 'ATL', 'Hawks', 1949, 'Atlanta', 'Atlanta Hawks', 'Atlanta', [1958]], + [1610612738, 'BOS', 'Celtics', 1946, 'Boston', 'Boston Celtics', 'Massachusetts', [1957, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1968, 1969, 1974, 1976, 1981, 1984, 1986, 2008]], + [1610612739, 'CLE', 'Cavaliers', 1970, 'Cleveland', 'Cleveland Cavaliers', 'Ohio', [2016]], + [1610612740, 'NOP', 'Pelicans', 2002, 'New Orleans', 'New Orleans Pelicans', 'Louisiana', []], + [1610612741, 'CHI', 'Bulls', 1966, 'Chicago', 'Chicago Bulls', 'Illinois', [1991, 1992, 1993, 1996, 1997, 1998]], + [1610612742, 'DAL', 'Mavericks', 1980, 'Dallas', 'Dallas Mavericks', 'Texas', [2011]], + [1610612743, 'DEN', 'Nuggets', 1976, 'Denver', 'Denver Nuggets', 'Colorado', []], + [1610612744, 'GSW', 'Warriors', 1946, 'Golden State', 'Golden State Warriors', 'California', [1947, 1956, 1975, 2015, 2017, 2018, 2022]], + [1610612745, 'HOU', 'Rockets', 1967, 'Houston', 'Houston Rockets', 'Texas', [1994, 1995]], + [1610612746, 'LAC', 'Clippers', 1970, 'Los Angeles', 'Los Angeles Clippers', 'California', []], + [1610612747, 'LAL', 'Lakers', 1948, 'Los Angeles', 'Los Angeles Lakers', 'California', [1949, 1950, 1952, 1953, 1954, 1972, 1980, 1982, 1985, 1987, 1988, 2000, 2001, 2002, 2009, 2010, 2020]], + [1610612748, 'MIA', 'Heat', 1988, 'Miami', 'Miami Heat', 'Florida', [2006, 2012, 2013]], + [1610612749, 'MIL', 'Bucks', 1968, 'Milwaukee', 'Milwaukee Bucks', 'Wisconsin', [1971, 2021]], + [1610612750, 'MIN', 'Timberwolves', 1989, 'Minnesota', 'Minnesota Timberwolves', 'Minnesota', []], + [1610612751, 'BKN', 'Nets', 1976, 'Brooklyn', 'Brooklyn Nets', 'New York', []], + [1610612752, 'NYK', 'Knicks', 1946, 'New York', 'New York Knicks', 'New York', [1970, 1973]], + [1610612753, 'ORL', 'Magic', 1989, 'Orlando', 'Orlando Magic', 'Florida', []], + [1610612754, 'IND', 'Pacers', 1976, 'Indiana', 'Indiana Pacers', 'Indiana', []], + [1610612755, 'PHI', '76ers', 1949, 'Philadelphia', 'Philadelphia 76ers', 'Pennsylvania', [1955, 1967, 1983]], + [1610612756, 'PHX', 'Suns', 1968, 'Phoenix', 'Phoenix Suns', 'Arizona', []], + [1610612757, 'POR', 'Trail Blazers', 1970, 'Portland', 'Portland Trail Blazers', 'Oregon', [1977]], + [1610612758, 'SAC', 'Kings', 1948, 'Sacramento', 'Sacramento Kings', 'California', [1951]], + [1610612759, 'SAS', 'Spurs', 1976, 'San Antonio', 'San Antonio Spurs', 'Texas', [1999, 2003, 2005, 2007, 2014]], + [1610612760, 'OKC', 'Thunder', 1967, 'Oklahoma City', 'Oklahoma City Thunder', 'Oklahoma', [1979]], + [1610612761, 'TOR', 'Raptors', 1995, 'Toronto', 'Toronto Raptors', 'Ontario', [2019]], + [1610612762, 'UTA', 'Jazz', 1974, 'Utah', 'Utah Jazz', 'Utah', []], + [1610612763, 'MEM', 'Grizzlies', 1995, 'Memphis', 'Memphis Grizzlies', 'Tennessee', []], + [1610612764, 'WAS', 'Wizards', 1961, 'Washington', 'Washington Wizards', 'District of Columbia', [1978]], + [1610612765, 'DET', 'Pistons', 1948, 'Detroit', 'Detroit Pistons', 'Michigan', [1989, 1990, 2004]], + [1610612766, 'CHA', 'Hornets', 1988, 'Charlotte', 'Charlotte Hornets', 'North Carolina', []] ] diff --git a/tools/stats/static_players_update/template.py b/tools/stats/static_players_update/template.py index 448fe24..7c3f5f0 100644 --- a/tools/stats/static_players_update/template.py +++ b/tools/stats/static_players_update/template.py @@ -18,38 +18,39 @@ team_index_year_founded = 3 team_index_city = 4 team_index_full_name = 5 team_index_state = 6 +team_index_championship_year = 7 teams = [ - [1610612737, 'ATL', 'Hawks', 1949, 'Atlanta', 'Atlanta Hawks', 'Atlanta'], - [1610612738, 'BOS', 'Celtics', 1946, 'Boston', 'Boston Celtics', 'Massachusetts'], - [1610612739, 'CLE', 'Cavaliers', 1970, 'Cleveland', 'Cleveland Cavaliers', 'Ohio'], - [1610612740, 'NOP', 'Pelicans', 2002, 'New Orleans', 'New Orleans Pelicans', 'Louisiana'], - [1610612741, 'CHI', 'Bulls', 1966, 'Chicago', 'Chicago Bulls', 'Illinois'], - [1610612742, 'DAL', 'Mavericks', 1980, 'Dallas', 'Dallas Mavericks', 'Texas'], - [1610612743, 'DEN', 'Nuggets', 1976, 'Denver', 'Denver Nuggets', 'Colorado'], - [1610612744, 'GSW', 'Warriors', 1946, 'Golden State', 'Golden State Warriors', 'California'], - [1610612745, 'HOU', 'Rockets', 1967, 'Houston', 'Houston Rockets', 'Texas'], - [1610612746, 'LAC', 'Clippers', 1970, 'Los Angeles', 'Los Angeles Clippers', 'California'], - [1610612747, 'LAL', 'Lakers', 1948, 'Los Angeles', 'Los Angeles Lakers', 'California'], - [1610612748, 'MIA', 'Heat', 1988, 'Miami', 'Miami Heat', 'Florida'], - [1610612749, 'MIL', 'Bucks', 1968, 'Milwaukee', 'Milwaukee Bucks', 'Wisconsin'], - [1610612750, 'MIN', 'Timberwolves', 1989, 'Minnesota', 'Minnesota Timberwolves', 'Minnesota'], - [1610612751, 'BKN', 'Nets', 1976, 'Brooklyn', 'Brooklyn Nets', 'New York'], - [1610612752, 'NYK', 'Knicks', 1946, 'New York', 'New York Knicks', 'New York'], - [1610612753, 'ORL', 'Magic', 1989, 'Orlando', 'Orlando Magic', 'Florida'], - [1610612754, 'IND', 'Pacers', 1976, 'Indiana', 'Indiana Pacers', 'Indiana'], - [1610612755, 'PHI', '76ers', 1949, 'Philadelphia', 'Philadelphia 76ers', 'Pennsylvania'], - [1610612756, 'PHX', 'Suns', 1968, 'Phoenix', 'Phoenix Suns', 'Arizona'], - [1610612757, 'POR', 'Trail Blazers', 1970, 'Portland', 'Portland Trail Blazers', 'Oregon'], - [1610612758, 'SAC', 'Kings', 1948, 'Sacramento', 'Sacramento Kings', 'California'], - [1610612759, 'SAS', 'Spurs', 1976, 'San Antonio', 'San Antonio Spurs', 'Texas'], - [1610612760, 'OKC', 'Thunder', 1967, 'Oklahoma City', 'Oklahoma City Thunder', 'Oklahoma'], - [1610612761, 'TOR', 'Raptors', 1995, 'Toronto', 'Toronto Raptors', 'Ontario'], - [1610612762, 'UTA', 'Jazz', 1974, 'Utah', 'Utah Jazz', 'Utah'], - [1610612763, 'MEM', 'Grizzlies', 1995, 'Memphis', 'Memphis Grizzlies', 'Tennessee'], - [1610612764, 'WAS', 'Wizards', 1961, 'Washington', 'Washington Wizards', 'District of Columbia'], - [1610612765, 'DET', 'Pistons', 1948, 'Detroit', 'Detroit Pistons', 'Michigan'], - [1610612766, 'CHA', 'Hornets', 1988, 'Charlotte', 'Charlotte Hornets', 'North Carolina'] + [1610612737, 'ATL', 'Hawks', 1949, 'Atlanta', 'Atlanta Hawks', 'Atlanta', [1958]], + [1610612738, 'BOS', 'Celtics', 1946, 'Boston', 'Boston Celtics', 'Massachusetts', [1957, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1968, 1969, 1974, 1976, 1981, 1984, 1986, 2008]], + [1610612739, 'CLE', 'Cavaliers', 1970, 'Cleveland', 'Cleveland Cavaliers', 'Ohio', [2016]], + [1610612740, 'NOP', 'Pelicans', 2002, 'New Orleans', 'New Orleans Pelicans', 'Louisiana', []], + [1610612741, 'CHI', 'Bulls', 1966, 'Chicago', 'Chicago Bulls', 'Illinois', [1991, 1992, 1993, 1996, 1997, 1998]], + [1610612742, 'DAL', 'Mavericks', 1980, 'Dallas', 'Dallas Mavericks', 'Texas', [2011]], + [1610612743, 'DEN', 'Nuggets', 1976, 'Denver', 'Denver Nuggets', 'Colorado', []], + [1610612744, 'GSW', 'Warriors', 1946, 'Golden State', 'Golden State Warriors', 'California', [1947, 1956, 1975, 2015, 2017, 2018, 2022]], + [1610612745, 'HOU', 'Rockets', 1967, 'Houston', 'Houston Rockets', 'Texas', [1994, 1995]], + [1610612746, 'LAC', 'Clippers', 1970, 'Los Angeles', 'Los Angeles Clippers', 'California', []], + [1610612747, 'LAL', 'Lakers', 1948, 'Los Angeles', 'Los Angeles Lakers', 'California', [1949, 1950, 1952, 1953, 1954, 1972, 1980, 1982, 1985, 1987, 1988, 2000, 2001, 2002, 2009, 2010, 2020]], + [1610612748, 'MIA', 'Heat', 1988, 'Miami', 'Miami Heat', 'Florida', [2006, 2012, 2013]], + [1610612749, 'MIL', 'Bucks', 1968, 'Milwaukee', 'Milwaukee Bucks', 'Wisconsin', [1971, 2021]], + [1610612750, 'MIN', 'Timberwolves', 1989, 'Minnesota', 'Minnesota Timberwolves', 'Minnesota', []], + [1610612751, 'BKN', 'Nets', 1976, 'Brooklyn', 'Brooklyn Nets', 'New York', []], + [1610612752, 'NYK', 'Knicks', 1946, 'New York', 'New York Knicks', 'New York', [1970, 1973]], + [1610612753, 'ORL', 'Magic', 1989, 'Orlando', 'Orlando Magic', 'Florida', []], + [1610612754, 'IND', 'Pacers', 1976, 'Indiana', 'Indiana Pacers', 'Indiana', []], + [1610612755, 'PHI', '76ers', 1949, 'Philadelphia', 'Philadelphia 76ers', 'Pennsylvania', [1955, 1967, 1983]], + [1610612756, 'PHX', 'Suns', 1968, 'Phoenix', 'Phoenix Suns', 'Arizona', []], + [1610612757, 'POR', 'Trail Blazers', 1970, 'Portland', 'Portland Trail Blazers', 'Oregon', [1977]], + [1610612758, 'SAC', 'Kings', 1948, 'Sacramento', 'Sacramento Kings', 'California', [1951]], + [1610612759, 'SAS', 'Spurs', 1976, 'San Antonio', 'San Antonio Spurs', 'Texas', [1999, 2003, 2005, 2007, 2014]], + [1610612760, 'OKC', 'Thunder', 1967, 'Oklahoma City', 'Oklahoma City Thunder', 'Oklahoma', [1979]], + [1610612761, 'TOR', 'Raptors', 1995, 'Toronto', 'Toronto Raptors', 'Ontario', [2019]], + [1610612762, 'UTA', 'Jazz', 1974, 'Utah', 'Utah Jazz', 'Utah', []], + [1610612763, 'MEM', 'Grizzlies', 1995, 'Memphis', 'Memphis Grizzlies', 'Tennessee', []], + [1610612764, 'WAS', 'Wizards', 1961, 'Washington', 'Washington Wizards', 'District of Columbia', [1978]], + [1610612765, 'DET', 'Pistons', 1948, 'Detroit', 'Detroit Pistons', 'Michigan', [1989, 1990, 2004]], + [1610612766, 'CHA', 'Hornets', 1988, 'Charlotte', 'Charlotte Hornets', 'North Carolina', []] ] '''
swar/nba_api
cd721a79a50b8c18e5846122e08a23666a4e1a08
diff --git a/tests/unit/test_static_data.py b/tests/unit/test_static_data.py new file mode 100644 index 0000000..de88a88 --- /dev/null +++ b/tests/unit/test_static_data.py @@ -0,0 +1,5 @@ +from nba_api.stats.static import teams + + +def test_get_request_url(): + assert len(teams.teams) == 30
[Bug]: cannot import name 'team_index_championship_year' from 'nba_api.stats.library.data' ### NBA API Version V1.1.12 ### Issue Working on Linux Ubuntu 22.04 LTS Installed nba_api using (!pip install) in a jupyter notebook cell. Getting an ImportError when I try to import (teams) using 'from nba_api.stats.static import teams' ### Code ``` In [ 66 ] : !pip install nba_api ``` ``` In [ 67 ] : from nba_api.stats.static import teams ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/test_static_data.py::test_get_request_url" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-10-16T00:18:06Z"
mit
swaroopch__edn_format-36
diff --git a/edn_format/edn_lex.py b/edn_format/edn_lex.py index 42fdea7..e3b729e 100644 --- a/edn_format/edn_lex.py +++ b/edn_format/edn_lex.py @@ -193,13 +193,13 @@ def t_BOOLEAN(t): def t_FLOAT(t): - r"""[+-]?\d+\.\d+[M]?([eE][+-]?\d+)?""" + r"""[+-]?\d+(?:\.\d+([eE][+-]?\d+)?|([eE][+-]?\d+))M?""" e_value = 0 if 'e' in t.value or 'E' in t.value: - matches = re.search('[eE]([+-]?\d+)$', t.value) + matches = re.search('[eE]([+-]?\d+)M?$', t.value) if matches is None: raise SyntaxError('Invalid float : {}'.format(t.value)) - e_value = int(matches.group()[1:]) + e_value = int(matches.group(1)) if t.value.endswith('M'): t.value = decimal.Decimal(t.value[:-1]) * pow(1, e_value) else:
swaroopch/edn_format
0616eb18781cd9d9394683b806b0b3033b47f371
diff --git a/tests.py b/tests.py index c77c273..437c2fd 100644 --- a/tests.py +++ b/tests.py @@ -143,7 +143,8 @@ class EdnTest(unittest.TestCase): ["+123N", "123"], ["123.2", "123.2"], ["+32.23M", "32.23M"], - ["3.23e10", "32300000000.0"] + ["3.23e10", "32300000000.0"], + ["3e10", "30000000000.0"], ] for literal in EDN_LITERALS: @@ -195,6 +196,8 @@ class EdnTest(unittest.TestCase): "32.23M", "-32.23M", "3.23e-10", + "3e+20", + "3E+20M", '["abc"]', '[1]', '[1 "abc"]',
issue parsing big numbers specifically `45e+43` and `45.4e+43M`
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests.py::EdnTest::test_round_trip_conversion", "tests.py::EdnTest::test_round_trip_same" ]
[ "tests.py::ConsoleTest::test_dumping", "tests.py::EdnTest::test_dump", "tests.py::EdnTest::test_keyword_keys", "tests.py::EdnTest::test_lexer", "tests.py::EdnTest::test_parser", "tests.py::EdnTest::test_proper_unicode_escape", "tests.py::EdnTest::test_round_trip_inst_short", "tests.py::EdnTest::test_round_trip_sets", "tests.py::EdnInstanceTest::test_equality", "tests.py::EdnInstanceTest::test_hashing" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2017-02-14T10:07:46Z"
apache-2.0
swaroopch__edn_format-43
diff --git a/edn_format/edn_lex.py b/edn_format/edn_lex.py index ac0a3af..fc2026e 100644 --- a/edn_format/edn_lex.py +++ b/edn_format/edn_lex.py @@ -102,7 +102,8 @@ tokens = ('WHITESPACE', 'MAP_START', 'SET_START', 'MAP_OR_SET_END', - 'TAG') + 'TAG', + 'DISCARD_TAG') PARTS = {} PARTS["non_nums"] = r"\w.*+!\-_?$%&=:#<>@" @@ -138,7 +139,7 @@ KEYWORD = (":" "[{all}]+" ")").format(**PARTS) TAG = (r"\#" - r"\w" + r"[a-zA-Z]" # https://github.com/edn-format/edn/issues/30#issuecomment-8540641 "(" "[{all}]*" r"\/" @@ -147,6 +148,8 @@ TAG = (r"\#" "[{all}]*" ")").format(**PARTS) +DISCARD_TAG = r"\#\_" + t_VECTOR_START = r'\[' t_VECTOR_END = r'\]' t_LIST_START = r'\(' @@ -228,9 +231,10 @@ def t_COMMENT(t): pass # ignore -def t_DISCARD(t): - r'\#_\S+\b' - pass # ignore [email protected](DISCARD_TAG) +def t_DISCARD_TAG(t): + t.value = t.value[1:] + return t @ply.lex.TOKEN(TAG) diff --git a/edn_format/edn_parse.py b/edn_format/edn_parse.py index c2be09d..329584e 100644 --- a/edn_format/edn_parse.py +++ b/edn_format/edn_parse.py @@ -56,41 +56,21 @@ def p_term_leaf(p): p[0] = p[1] -def p_empty_vector(p): - """vector : VECTOR_START VECTOR_END""" - p[0] = ImmutableList([]) - - def p_vector(p): """vector : VECTOR_START expressions VECTOR_END""" p[0] = ImmutableList(p[2]) -def p_empty_list(p): - """list : LIST_START LIST_END""" - p[0] = tuple() - - def p_list(p): """list : LIST_START expressions LIST_END""" p[0] = tuple(p[2]) -def p_empty_set(p): - """set : SET_START MAP_OR_SET_END""" - p[0] = frozenset() - - def p_set(p): """set : SET_START expressions MAP_OR_SET_END""" p[0] = frozenset(p[2]) -def p_empty_map(p): - """map : MAP_START MAP_OR_SET_END""" - p[0] = ImmutableDict({}) - - def p_map(p): """map : MAP_START expressions MAP_OR_SET_END""" terms = p[2] @@ -100,14 +80,20 @@ def p_map(p): p[0] = ImmutableDict(dict([terms[i:i + 2] for i in range(0, len(terms), 2)])) -def p_expressions_expressions_expression(p): - """expressions : expressions expression""" - p[0] = p[1] + [p[2]] +def p_discarded_expressions(p): + """discarded_expressions : DISCARD_TAG expression discarded_expressions + |""" + p[0] = [] + +def p_expressions_expression_expressions(p): + """expressions : expression expressions""" + p[0] = [p[1]] + p[2] -def p_expressions_expression(p): - """expressions : expression""" - p[0] = [p[1]] + +def p_expressions_empty(p): + """expressions : discarded_expressions""" + p[0] = [] def p_expression(p): @@ -119,6 +105,11 @@ def p_expression(p): p[0] = p[1] +def p_expression_discard_expression_expression(p): + """expression : DISCARD_TAG expression expression""" + p[0] = p[3] + + def p_expression_tagged_element(p): """expression : TAG expression""" tag = p[1] @@ -144,9 +135,13 @@ def p_expression_tagged_element(p): p[0] = output +def eof(): + raise EDNDecodeError('EOF Reached') + + def p_error(p): if p is None: - raise EDNDecodeError('EOF Reached') + eof() else: raise EDNDecodeError(p)
swaroopch/edn_format
7a3865c6d7ddc1a8d2d8ecb4114f11ed8b96fda8
diff --git a/tests.py b/tests.py index 8f7fcc1..29562d5 100644 --- a/tests.py +++ b/tests.py @@ -133,6 +133,12 @@ class EdnTest(unittest.TestCase): def check_roundtrip(self, data_input, **kw): self.assertEqual(data_input, loads(dumps(data_input, **kw))) + def check_eof(self, data_input, **kw): + with self.assertRaises(EDNDecodeError) as ctx: + loads(data_input, **kw) + + self.assertEqual('EOF Reached', str(ctx.exception)) + def test_dump(self): self.check_roundtrip({1, 2, 3}) self.check_roundtrip({1, 2, 3}, sort_sets=True) @@ -339,6 +345,57 @@ class EdnTest(unittest.TestCase): set(seq), sort_sets=True) + def test_discard(self): + for expected, edn_data in ( + ('[x]', '[x #_ z]'), + ('[z]', '[#_ x z]'), + ('[x z]', '[x #_ y z]'), + ('{1 4}', '{1 #_ 2 #_ 3 4}'), + ('[1 2]', '[1 #_ [ #_ [ #_ [ #_ [ #_ 42 ] ] ] ] 2 ]'), + ('[1 2 11]', '[1 2 #_ #_ #_ #_ 4 5 6 #_ 7 #_ #_ 8 9 10 11]'), + ('()', '(#_(((((((1))))))))'), + ('[6]', '[#_ #_ #_ #_ #_ 1 2 3 4 5 6]'), + ('[4]', '[#_ #_ 1 #_ 2 3 4]'), + ('{:a 1}', '{:a #_:b 1}'), + ('[42]', '[42 #_ {:a [1 2 3 4] true false 1 #inst "2017"}]'), + ('#{1}', '#{1 #_foo}'), + ('"#_ foo"', '"#_ foo"'), + ('["#" _]', '[\#_]'), + ('[_]', '[#_\#_]'), + ('[1]', '[1 #_\n\n42]'), + ('{}', '{#_ 1}'), + ): + self.assertEqual(expected, dumps(loads(edn_data)), edn_data) + + def test_discard_syntax_errors(self): + for edn_data in ('#_', '#_ #_ 1', '#inst #_ 2017', '[#_]'): + with self.assertRaises(EDNDecodeError): + loads(edn_data) + + def test_discard_all(self): + for edn_data in ( + '42', '-1', 'nil', 'true', 'false', '"foo"', '\\space', '\\a', + ':foo', ':foo/bar', '[]', '{}', '#{}', '()', '(a)', '(a b)', + '[a [[[b] c]] 2]', '#inst "2017"', + ): + self.assertEqual([1], loads('[1 #_ {}]'.format(edn_data)), edn_data) + self.assertEqual([1], loads('[#_ {} 1]'.format(edn_data)), edn_data) + + self.check_eof('#_ {}'.format(edn_data)) + + for coll in ('[%s]', '(%s)', '{%s}', '#{%s}'): + expected = coll % "" + edn_data = coll % '#_ {}'.format(edn_data) + self.assertEqual(expected, dumps(loads(edn_data)), edn_data) + + def test_chained_discards(self): + for expected, edn_data in ( + ('[]', '[#_ 1 #_ 2 #_ 3]'), + ('[]', '[#_ #_ 1 2 #_ 3]'), + ('[]', '[#_ #_ #_ 1 2 3]'), + ): + self.assertEqual(expected, dumps(loads(edn_data)), edn_data) + class EdnInstanceTest(unittest.TestCase): def test_hashing(self):
not handling discard as expected `[x #_ y z]` should yield `[Symbol(x), Symbol(z)]` but instead it is failing saying: "Don't know how to handle tag _"
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests.py::EdnTest::test_chained_discards", "tests.py::EdnTest::test_discard", "tests.py::EdnTest::test_discard_all", "tests.py::EdnTest::test_discard_syntax_errors" ]
[ "tests.py::ConsoleTest::test_dumping", "tests.py::EdnTest::test_chars", "tests.py::EdnTest::test_dump", "tests.py::EdnTest::test_exceptions", "tests.py::EdnTest::test_keyword_keys", "tests.py::EdnTest::test_lexer", "tests.py::EdnTest::test_parser", "tests.py::EdnTest::test_proper_unicode_escape", "tests.py::EdnTest::test_round_trip_conversion", "tests.py::EdnTest::test_round_trip_inst_short", "tests.py::EdnTest::test_round_trip_same", "tests.py::EdnTest::test_round_trip_sets", "tests.py::EdnTest::test_sort_keys", "tests.py::EdnTest::test_sort_sets", "tests.py::EdnInstanceTest::test_equality", "tests.py::EdnInstanceTest::test_hashing", "tests.py::ImmutableListTest::test_list" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2018-08-04T10:41:01Z"
apache-2.0
swen128__twitter-text-python-7
diff --git a/twitter_text/parse_tweet.py b/twitter_text/parse_tweet.py index ffa4f3e..db0d7cb 100644 --- a/twitter_text/parse_tweet.py +++ b/twitter_text/parse_tweet.py @@ -1,3 +1,4 @@ +import re import unicodedata from math import floor from typing import List, Dict @@ -25,6 +26,10 @@ class ParsedResult: return attr.asdict(self) +def convert_line_ending(string, to="\n"): + return re.sub(r'\r\n|\r|\n', to, string) + + def parse_tweet(text: str, options: dict = config['defaults']) -> ParsedResult: """ Parse a Twitter text according to https://developer.twitter.com/en/docs/developer-utilities/twitter-text @@ -106,7 +111,7 @@ def parse_tweet(text: str, options: dict = config['defaults']) -> ParsedResult: emoji_parsing_enabled = options['emoji_parsing_enabled'] max_weighted_tweet_length = options['max_weighted_tweet_length'] - normalized_text = unicodedata.normalize('NFC', text) + normalized_text = convert_line_ending(unicodedata.normalize('NFC', text)) url_entities_map = transform_entities_to_hash(extract_urls_with_indices(normalized_text)) emoji_entities_map = transform_entities_to_hash(extract_emojis_with_indices(normalized_text))
swen128/twitter-text-python
ff20ed9def7773695e875257cf59783aa5d20001
diff --git a/tests/cases/added.yml b/tests/cases/added.yml index b20b36d..1391e2f 100644 --- a/tests/cases/added.yml +++ b/tests/cases/added.yml @@ -6,6 +6,16 @@ tests: - url: "https://t.co/slug" indices: [0, 17] ParseTweet: + - description: "CRLF character" + text: "a\r\nb" + expected: + weightedLength: 3 + valid: true + permillage: 10 + displayRangeStart: 0 + displayRangeEnd: 3 + validRangeStart: 0 + validRangeEnd: 3 - description: "A URL containing emojis" text: "https://😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷😷.jp" expected:
[Bug] "\r\n" を2文字としてカウントするバグ Tweet API を使用していて、ツイッターをブラウザで開いてツイートできる文字数なのに、parse_tweet関数を使用すると文字数オーバーとなる奇妙な現象に遭遇・・・。 おそらく"\r" を改行として取得できていません。 以下のテストコードを見て下さい。 ``` from twitter_text import parse_tweet text1 = '''abc def ghi''' print(text1) print(parse_tweet(text1)) print() text2 = 'abc\ndef\nghi' print(text2) print(parse_tweet(text2)) print() text3 = 'abc\rdef\rghi' print(text3) print(parse_tweet(text3)) print() text4 = 'abc\r\ndef\r\nghi' print(text4) print(parse_tweet(text4)) ``` 結果は次のようになります。 ``` abc def ghi ParsedResult(valid=True, weightedLength=11, permillage=39, validRangeStart=0, validRangeEnd=10, displayRangeStart=0, displayRangeEnd=10) abc def ghi ParsedResult(valid=True, weightedLength=11, permillage=39, validRangeStart=0, validRangeEnd=10, displayRangeStart=0, displayRangeEnd=10) ghi ParsedResult(valid=True, weightedLength=11, permillage=39, validRangeStart=0, validRangeEnd=10, displayRangeStart=0, displayRangeEnd=10) abc def ghi ParsedResult(valid=True, weightedLength=13, permillage=46, validRangeStart=0, validRangeEnd=12, displayRangeStart=0, displayRangeEnd=12) ``` 改行文字を1文字として取得しているならば、 "weightedLength=11"が正しく、それ以外は間違っています。 修正して下さい。
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_added.py::test_added_parse_tweet[CRLF" ]
[ "tests/test_conformance.py::test_extract_urls[DO", "tests/test_conformance.py::test_extract_urls[Extract", "tests/test_conformance.py::test_tlds_country[bb", "tests/test_conformance.py::test_extract_urls_with_directional_markers[Extract", "tests/test_conformance.py::test_tlds_country[cl", "tests/test_conformance.py::test_tlds_country[uy", "tests/test_conformance.py::test_tlds_country[ee", "tests/test_conformance.py::test_tlds_country[ru", "tests/test_conformance.py::test_tlds_country[ai", "tests/test_conformance.py::test_tlds_country[nz", "tests/test_conformance.py::test_tlds_country[nf", "tests/test_conformance.py::test_tlds_country[la", "tests/test_conformance.py::test_tlds_country[cx", "tests/test_conformance.py::test_tlds_country[\\u0639\\u0645\\u0627\\u0646", "tests/test_conformance.py::test_extract_urls_with_indices[Extract", "tests/test_conformance.py::test_tlds_country[mz", "tests/test_conformance.py::test_tlds_country[bv", "tests/test_conformance.py::test_tlds_country[fo", "tests/test_conformance.py::test_tlds_country[pe", "tests/test_conformance.py::test_tlds_country[mt", "tests/test_conformance.py::test_tlds_country[ws", "tests/test_conformance.py::test_tlds_country[ca", "tests/test_conformance.py::test_tlds_country[gy", "tests/test_conformance.py::test_tlds_country[fk", "tests/test_conformance.py::test_tlds_country[lb", "tests/test_conformance.py::test_tlds_country[mr", "tests/test_conformance.py::test_tlds_country[ua", "tests/test_conformance.py::test_tlds_country[cc", "tests/test_conformance.py::test_tlds_country[ne", "tests/test_conformance.py::test_tlds_country[cr", "tests/test_conformance.py::test_tlds_country[\\u0633\\u0648\\u062f\\u0627\\u0646", "tests/test_conformance.py::test_tlds_country[\\u0b87\\u0bb2\\u0b99\\u0bcd\\u0b95\\u0bc8", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[Unicode", "tests/test_conformance.py::test_tlds_country[tg", "tests/test_conformance.py::test_tlds_country[py", "tests/test_conformance.py::test_tlds_country[ly", "tests/test_conformance.py::test_tlds_country[\\u067e\\u0627\\u06a9\\u0633\\u062a\\u0627\\u0646", "tests/test_conformance.py::test_tlds_country[fj", "tests/test_conformance.py::test_tlds_country[nl", "tests/test_conformance.py::test_tlds_country[br", "tests/test_conformance.py::test_tlds_country[\\u0440\\u0444", "tests/test_conformance.py::test_tlds_country[sn", "tests/test_conformance.py::test_tlds_country[hu", "tests/test_conformance.py::test_tlds_country[za", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[10", "tests/test_conformance.py::test_tlds_country[ht", "tests/test_conformance.py::test_tlds_country[bj", "tests/test_conformance.py::test_tlds_country[kp", "tests/test_conformance.py::test_tlds_country[ug", "tests/test_conformance.py::test_tlds_country[al", "tests/test_conformance.py::test_tlds_country[gp", "tests/test_conformance.py::test_tlds_country[gf", "tests/test_conformance.py::test_validate_unicode_directional_marker_counter_test[Tweet", "tests/test_conformance.py::test_tlds_country[\\u4e2d\\u570b", "tests/test_conformance.py::test_tlds_country[tv", "tests/test_conformance.py::test_tlds_country[\\u0d2d\\u0d3e\\u0d30\\u0d24\\u0d02", "tests/test_conformance.py::test_tlds_country[bg", "tests/test_conformance.py::test_tlds_country[bw", "tests/test_conformance.py::test_tlds_country[re", "tests/test_conformance.py::test_tlds_country[bn", "tests/test_conformance.py::test_tlds_country[\\u0627\\u0644\\u0633\\u0639\\u0648\\u062f\\u064a\\u0629", "tests/test_conformance.py::test_tlds_country[ac", "tests/test_conformance.py::test_tlds_country[kn", "tests/test_conformance.py::test_tlds_country[vc", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[Count", "tests/test_conformance.py::test_tlds_country[\\ud55c\\uad6d", "tests/test_conformance.py::test_tlds_country[gi", "tests/test_conformance.py::test_tlds_country[cv", "tests/test_conformance.py::test_tlds_country[ni", "tests/test_conformance.py::test_tlds_country[pf", "tests/test_conformance.py::test_tlds_country[ki", "tests/test_conformance.py::test_tlds_country[ms", "tests/test_conformance.py::test_tlds_country[\\u0431\\u0433", "tests/test_conformance.py::test_tlds_country[km", "tests/test_conformance.py::test_tlds_country[np", "tests/test_conformance.py::test_tlds_country[\\u0b87\\u0ba8\\u0bcd\\u0ba4\\u0bbf\\u0baf\\u0bbe", "tests/test_conformance.py::test_tlds_country[ar", "tests/test_conformance.py::test_tlds_country[sm", "tests/test_conformance.py::test_tlds_country[vg", "tests/test_conformance.py::test_tlds_country[yt", "tests/test_conformance.py::test_extract_tco_urls_with_params[Extract", "tests/test_conformance.py::test_tlds_country[cy", "tests/test_conformance.py::test_tlds_country[eh", "tests/test_conformance.py::test_tlds_country[ng", "tests/test_conformance.py::test_tlds_country[sr", "tests/test_conformance.py::test_tlds_country[mh", "tests/test_conformance.py::test_tlds_country[lc", "tests/test_conformance.py::test_tlds_country[\\u6fb3\\u9580", "tests/test_conformance.py::test_tlds_country[na", "tests/test_conformance.py::test_tlds_country[me", "tests/test_conformance.py::test_tlds_country[pw", "tests/test_conformance.py::test_tlds_country[\\u043c\\u043a\\u0434", "tests/test_conformance.py::test_tlds_country[\\u049b\\u0430\\u0437", "tests/test_conformance.py::test_tlds_country[\\u0e44\\u0e17\\u0e22", "tests/test_conformance.py::test_tlds_country[ag", "tests/test_conformance.py::test_tlds_country[tf", "tests/test_conformance.py::test_tlds_country[gw", "tests/test_conformance.py::test_tlds_country[mf", "tests/test_conformance.py::test_tlds_country[sy", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[Handle", "tests/test_conformance.py::test_tlds_country[au", "tests/test_conformance.py::test_tlds_country[pg", "tests/test_conformance.py::test_tlds_country[\\u09ac\\u09be\\u0982\\u09b2\\u09be", "tests/test_conformance.py::test_tlds_country[\\u043c\\u043e\\u043d", "tests/test_conformance.py::test_tlds_country[kh", "tests/test_conformance.py::test_tlds_country[gn", "tests/test_conformance.py::test_tlds_country[no", "tests/test_conformance.py::test_tlds_country[de", "tests/test_conformance.py::test_tlds_country[tm", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[282", "tests/test_conformance.py::test_tlds_country[mu", "tests/test_conformance.py::test_tlds_country[bo", "tests/test_conformance.py::test_tlds_country[\\u092d\\u093e\\u0930\\u0924", "tests/test_conformance.py::test_tlds_country[\\u0441\\u0440\\u0431", "tests/test_conformance.py::test_tlds_country[bq", "tests/test_conformance.py::test_tlds_country[by", "tests/test_conformance.py::test_tlds_country[iq", "tests/test_conformance.py::test_tlds_country[aq", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[Do", "tests/test_conformance.py::test_tlds_country[\\u0dbd\\u0d82\\u0d9a\\u0dcf", "tests/test_conformance.py::test_tlds_country[eg", "tests/test_conformance.py::test_tlds_country[be", "tests/test_conformance.py::test_tlds_country[io", "tests/test_conformance.py::test_tlds_country[mw", "tests/test_conformance.py::test_tlds_country[pl", "tests/test_conformance.py::test_tlds_country[in", "tests/test_conformance.py::test_tlds_country[nu", "tests/test_conformance.py::test_tlds_country[\\u0b9a\\u0bbf\\u0b99\\u0bcd\\u0b95\\u0baa\\u0bcd\\u0baa\\u0bc2\\u0bb0\\u0bcd", "tests/test_conformance.py::test_tlds_country[mm", "tests/test_conformance.py::test_tlds_country[cd", "tests/test_conformance.py::test_tlds_country[ad", "tests/test_conformance.py::test_tlds_country[sz", "tests/test_conformance.py::test_tlds_country[li", "tests/test_conformance.py::test_tlds_country[\\u9999\\u6e2f", "tests/test_conformance.py::test_tlds_country[dm", "tests/test_conformance.py::test_tlds_country[\\u0633\\u0648\\u0631\\u064a\\u0629", "tests/test_conformance.py::test_tlds_country[cf", "tests/test_conformance.py::test_tlds_country[ps", "tests/test_conformance.py::test_tlds_country[\\u0b2d\\u0b3e\\u0b30\\u0b24", "tests/test_conformance.py::test_tlds_country[md", "tests/test_conformance.py::test_tlds_country[it", "tests/test_conformance.py::test_tlds_country[sb", "tests/test_conformance.py::test_tlds_country[\\u0641\\u0644\\u0633\\u0637\\u064a\\u0646", "tests/test_conformance.py::test_tlds_country[sh", "tests/test_conformance.py::test_tlds_country[\\u0627\\u0644\\u0627\\u0631\\u062f\\u0646", "tests/test_conformance.py::test_tlds_country[rs", "tests/test_conformance.py::test_tlds_country[sv", "tests/test_conformance.py::test_tlds_country[vu", "tests/test_conformance.py::test_tlds_country[at", "tests/test_conformance.py::test_tlds_country[tw", "tests/test_conformance.py::test_tlds_country[jm", "tests/test_conformance.py::test_tlds_country[mn", "tests/test_conformance.py::test_tlds_country[tt", "tests/test_conformance.py::test_tlds_country[tc", "tests/test_conformance.py::test_tlds_country[\\u0435\\u044e", "tests/test_conformance.py::test_tlds_country[va", "tests/test_conformance.py::test_tlds_country[ml", "tests/test_conformance.py::test_tlds_country[mc", "tests/test_conformance.py::test_tlds_country[\\u0680\\u0627\\u0631\\u062a", "tests/test_conformance.py::test_tlds_country[rw", "tests/test_conformance.py::test_validate_unicode_directional_marker_counter_test[Handle", "tests/test_conformance.py::test_tlds_country[eu", "tests/test_conformance.py::test_tlds_country[ge", "tests/test_conformance.py::test_tlds_country[\\u092d\\u093e\\u0930\\u094b\\u0924", "tests/test_conformance.py::test_tlds_country[uk", "tests/test_conformance.py::test_tlds_country[\\u4e2d\\u56fd", "tests/test_conformance.py::test_tlds_country[pr", "tests/test_conformance.py::test_tlds_country[cn", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[160", "tests/test_conformance.py::test_tlds_country[\\u0cad\\u0cbe\\u0cb0\\u0ca4", "tests/test_conformance.py::test_tlds_country[\\u0431\\u0435\\u043b", "tests/test_conformance.py::test_tlds_country[bi", "tests/test_conformance.py::test_tlds_country[ga", "tests/test_conformance.py::test_tlds_country[bl", "tests/test_conformance.py::test_tlds_country[hk", "tests/test_conformance.py::test_tlds_country[kz", "tests/test_conformance.py::test_tlds_country[gq", "tests/test_conformance.py::test_tlds_country[gb", "tests/test_conformance.py::test_tlds_country[ec", "tests/test_conformance.py::test_tlds_country[do", "tests/test_conformance.py::test_tlds_country[\\u0627\\u0644\\u062c\\u0632\\u0627\\u0626\\u0631", "tests/test_conformance.py::test_tlds_country[cu", "tests/test_conformance.py::test_tlds_country[bd", "tests/test_conformance.py::test_tlds_country[bs", "tests/test_conformance.py::test_tlds_country[az", "tests/test_conformance.py::test_tlds_country[qa", "tests/test_conformance.py::test_tlds_country[\\u0627\\u06cc\\u0631\\u0627\\u0646", "tests/test_conformance.py::test_tlds_country[je", "tests/test_conformance.py::test_tlds_country[fi", "tests/test_conformance.py::test_tlds_country[fm", "tests/test_conformance.py::test_tlds_country[aw", "tests/test_conformance.py::test_tlds_country[tp", "tests/test_conformance.py::test_tlds_country[gl", "tests/test_conformance.py::test_tlds_country[\\u0628\\u06be\\u0627\\u0631\\u062a", "tests/test_conformance.py::test_tlds_country[\\u0a2d\\u0a3e\\u0a30\\u0a24", "tests/test_conformance.py::test_tlds_country[am", "tests/test_conformance.py::test_tlds_country[mx", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[Allow", "tests/test_conformance.py::test_tlds_country[\\u0639\\u0631\\u0627\\u0642", "tests/test_conformance.py::test_tlds_country[mo", "tests/test_conformance.py::test_tlds_country[tk", "tests/test_conformance.py::test_tlds_country[gd", "tests/test_conformance.py::test_tlds_country[kw", "tests/test_conformance.py::test_tlds_country[\\u0627\\u0645\\u0627\\u0631\\u0627\\u062a", "tests/test_conformance.py::test_tlds_country[\\u0aad\\u0abe\\u0ab0\\u0aa4", "tests/test_conformance.py::test_tlds_country[so", "tests/test_conformance.py::test_tlds_country[\\u0642\\u0637\\u0631", "tests/test_conformance.py::test_tlds_country[hr", "tests/test_conformance.py::test_tlds_country[tl", "tests/test_conformance.py::test_tlds_country[ci", "tests/test_conformance.py::test_tlds_country[dk", "tests/test_conformance.py::test_tlds_country[pt", "tests/test_conformance.py::test_tlds_country[vn", "tests/test_conformance.py::test_tlds_country[gg", "tests/test_conformance.py::test_tlds_country[zm", "tests/test_conformance.py::test_tlds_country[lv", "tests/test_conformance.py::test_extract_urls_with_indices[Properly", "tests/test_conformance.py::test_tlds_country[sg", "tests/test_conformance.py::test_tlds_country[ck", "tests/test_conformance.py::test_tlds_country[my", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[Long", "tests/test_conformance.py::test_tlds_country[sc", "tests/test_conformance.py::test_tlds_country[bh", "tests/test_conformance.py::test_tlds_country[lk", "tests/test_conformance.py::test_tlds_country[lr", "tests/test_conformance.py::test_tlds_country[ls", "tests/test_conformance.py::test_tlds_country[ma", "tests/test_conformance.py::test_tlds_country[\\u0c2d\\u0c3e\\u0c30\\u0c24\\u0c4d", "tests/test_conformance.py::test_tlds_country[nc", "tests/test_conformance.py::test_tlds_country[ph", "tests/test_conformance.py::test_tlds_country[gh", "tests/test_conformance.py::test_tlds_country[es", "tests/test_conformance.py::test_tlds_country[nr", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[Just", "tests/test_conformance.py::test_tlds_country[ye", "tests/test_conformance.py::test_tlds_country[gu", "tests/test_conformance.py::test_tlds_country[jp", "tests/test_conformance.py::test_tlds_country[\\u0627\\u0644\\u0645\\u063a\\u0631\\u0628", "tests/test_conformance.py::test_tlds_country[mq", "tests/test_conformance.py::test_tlds_country[cw", "tests/test_conformance.py::test_tlds_country[co", "tests/test_conformance.py::test_tlds_country[\\u09ad\\u09be\\u09b0\\u09a4", "tests/test_conformance.py::test_tlds_country[pn", "tests/test_conformance.py::test_tlds_country[tj", "tests/test_conformance.py::test_tlds_country[hm", "tests/test_conformance.py::test_tlds_country[as", "tests/test_conformance.py::test_tlds_country[cz", "tests/test_conformance.py::test_tlds_country[\\u53f0\\u7063", "tests/test_conformance.py::test_tlds_country[ve", "tests/test_conformance.py::test_tlds_country[sx", "tests/test_conformance.py::test_tlds_country[ss", "tests/test_conformance.py::test_tlds_country[si", "tests/test_conformance.py::test_tlds_country[sl", "tests/test_conformance.py::test_tlds_country[\\u0628\\u0627\\u0631\\u062a", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[140", "tests/test_conformance.py::test_tlds_country[et", "tests/test_conformance.py::test_tlds_country[om", "tests/test_conformance.py::test_tlds_country[su", "tests/test_conformance.py::test_tlds_country[ch", "tests/test_conformance.py::test_tlds_country[pm", "tests/test_conformance.py::test_tlds_country[sa", "tests/test_conformance.py::test_tlds_country[tz", "tests/test_conformance.py::test_tlds_country[\\u09ad\\u09be\\u09f0\\u09a4", "tests/test_conformance.py::test_tlds_country[\\u0645\\u0635\\u0631", "tests/test_conformance.py::test_tlds_country[bz", "tests/test_conformance.py::test_tlds_country[ke", "tests/test_conformance.py::test_tlds_country[\\u03b5\\u03bb", "tests/test_conformance.py::test_tlds_country[fr", "tests/test_conformance.py::test_tlds_country[um", "tests/test_conformance.py::test_tlds_country[an", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[Regular", "tests/test_conformance.py::test_tlds_country[kg", "tests/test_conformance.py::test_tlds_country[tn", "tests/test_conformance.py::test_tlds_country[\\u65b0\\u52a0\\u5761", "tests/test_conformance.py::test_tlds_country[im", "tests/test_conformance.py::test_tlds_country[lu", "tests/test_conformance.py::test_tlds_country[er", "tests/test_conformance.py::test_tlds_country[sj", "tests/test_conformance.py::test_tlds_country[ao", "tests/test_conformance.py::test_tlds_country[\\u53f0\\u6e7e", "tests/test_conformance.py::test_tlds_country[mp", "tests/test_conformance.py::test_tlds_country[sd", "tests/test_conformance.py::test_tlds_country[gs", "tests/test_conformance.py::test_tlds_country[jo", "tests/test_conformance.py::test_tlds_country[tr", "tests/test_conformance.py::test_tlds_country[pk", "tests/test_conformance.py::test_tlds_country[mg", "tests/test_conformance.py::test_tlds_country[cm", "tests/test_conformance.py::test_tlds_country[se", "tests/test_conformance.py::test_tlds_country[gt", "tests/test_conformance.py::test_tlds_country[\\u062a\\u0648\\u0646\\u0633", "tests/test_conformance.py::test_tlds_country[td", "tests/test_conformance.py::test_tlds_country[vi", "tests/test_conformance.py::test_tlds_country[th", "tests/test_conformance.py::test_tlds_country[af", "tests/test_conformance.py::test_tlds_country[bm", "tests/test_conformance.py::test_tlds_country[ir", "tests/test_conformance.py::test_tlds_country[uz", "tests/test_conformance.py::test_tlds_country[\\u0645\\u0644\\u064a\\u0633\\u064a\\u0627", "tests/test_conformance.py::test_tlds_country[zw", "tests/test_conformance.py::test_tlds_country[il", "tests/test_conformance.py::test_tlds_country[sk", "tests/test_conformance.py::test_validate_weighted_tweets_with_discounted_emoji_counter_test[3", "tests/test_conformance.py::test_tlds_country[to", "tests/test_conformance.py::test_tlds_country[mv", "tests/test_conformance.py::test_tlds_country[ro", "tests/test_conformance.py::test_tlds_country[st", "tests/test_conformance.py::test_tlds_country[\\u0443\\u043a\\u0440", "tests/test_conformance.py::test_tlds_country[dj", "tests/test_conformance.py::test_tlds_country[gr", "tests/test_conformance.py::test_tlds_country[ax", "tests/test_conformance.py::test_tlds_country[ie", "tests/test_conformance.py::test_tlds_country[wf", "tests/test_conformance.py::test_tlds_country[\\u092d\\u093e\\u0930\\u0924\\u092e\\u094d", "tests/test_conformance.py::test_tlds_country[ky", "tests/test_conformance.py::test_tlds_country[lt", "tests/test_conformance.py::test_tlds_country[bt", "tests/test_conformance.py::test_tlds_country[id", "tests/test_conformance.py::test_tlds_country[cg", "tests/test_conformance.py::test_tlds_country[us", "tests/test_conformance.py::test_tlds_country[dz", "tests/test_conformance.py::test_tlds_country[\\u0570\\u0561\\u0575", "tests/test_conformance.py::test_tlds_country[mk", "tests/test_conformance.py::test_tlds_country[is", "tests/test_conformance.py::test_tlds_country[kr", "tests/test_conformance.py::test_tlds_country[bf", "tests/test_conformance.py::test_tlds_country[ba", "tests/test_conformance.py::test_tlds_country[pa", "tests/test_conformance.py::test_tlds_country[\\u10d2\\u10d4", "tests/test_conformance.py::test_tlds_country[gm", "tests/test_conformance.py::test_tlds_country[hn", "tests/test_conformance.py::test_tlds_country[ae", "tests/test_added.py::test_added_parse_tweet[A", "tests/test_added.py::test_added_parse_tweet[Hangul", "tests/test_added.py::test_added_parse_tweet[One", "tests/test_added.py::test_added_extract_urls_with_indices[t.co" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2020-05-24T01:56:24Z"
mit
sybila__eBCSgen-105
diff --git a/eBCSgen/Parsing/ParseBCSL.py b/eBCSgen/Parsing/ParseBCSL.py index 1fc448f..4bb2843 100644 --- a/eBCSgen/Parsing/ParseBCSL.py +++ b/eBCSgen/Parsing/ParseBCSL.py @@ -362,6 +362,10 @@ class TransformAbstractSyntax(Transformer): self.complex_defns = complex_defns def cmplx_name(self, matches): + if str(matches[0]) not in self.complex_defns: + raise ComplexParsingError( + f"Complex alias {matches[0]} not found in defined complexes: {list(self.complex_defns.keys())}", matches + ) return deepcopy(self.complex_defns[str(matches[0])]) def abstract_sequence(self, matches): @@ -544,11 +548,19 @@ class TreeToComplex(Transformer): def structure(self, matches): name = str(matches[0].children[0]) - if len(matches) > 1: - composition = set(matches[1].children) - return StructureAgent(name, composition) - else: + if len(matches) <= 1: return StructureAgent(name, set()) + atomic_names = set() + composition = set() + for atomic in matches[1].children: + if atomic.name in atomic_names: + raise ComplexParsingError( + f"Duplicate atomic agent in structure: {atomic.name}", matches + ) + atomic_names.add(atomic.name) + composition.add(atomic) + + return StructureAgent(name, composition) def rate_complex(self, matches): sequence = []
sybila/eBCSgen
2e3b30105b1b1760a997bbacfc7e8487eb625961
diff --git a/Testing/objects_testing.py b/Testing/objects_testing.py index 71742b5..a324d26 100644 --- a/Testing/objects_testing.py +++ b/Testing/objects_testing.py @@ -58,7 +58,7 @@ u2_c1_u = AtomicAgent("U", "u") # structure s1 = StructureAgent("B", {a1}) s2 = StructureAgent("D", set()) -s3 = StructureAgent("K", {a1, a3, a5}) +s3 = StructureAgent("K", {a1, a3, a11}) s4 = StructureAgent("B", {a4}) s5 = StructureAgent("D", {a5, a6}) s6 = StructureAgent("K", set()) diff --git a/Testing/parsing/test_complex.py b/Testing/parsing/test_complex.py index 77d5d35..e16dfb3 100644 --- a/Testing/parsing/test_complex.py +++ b/Testing/parsing/test_complex.py @@ -6,12 +6,12 @@ def test_parser(): assert ret.success assert ret.data.children[0] == objects.c1 - ret = objects.rate_complex_parser.parse("B(T{s}).D().K(T{s},S{s},S{_})::cell") + ret = objects.rate_complex_parser.parse("B(T{s}).D().K(T{s},S{s},U{a})::cell") assert ret.success assert ret.data.children[0] == objects.c2 ret = objects.rate_complex_parser.parse( - "B(T{s}).K(T{s}, S{s}, S{_}).D(S{_},T{p})::cyt" + "B(T{s}).K(T{s}, S{s}, U{a}).D(S{_},T{p})::cyt" ) assert ret.success assert ret.data.children[0] == objects.c3 @@ -58,3 +58,12 @@ def test_parser(): ret = objects.rate_complex_parser.parse("B(T{s})::") assert not ret.success + + ret = objects.rate_complex_parser.parse("B(T{s}, T{_})::cell") + assert not ret.success + + ret = objects.rate_complex_parser.parse("B(T{s}, T{s})::cell") + assert not ret.success + + ret = objects.rate_complex_parser.parse("B(T{s}, T{a})::cell") + assert not ret.success diff --git a/Testing/parsing/test_side.py b/Testing/parsing/test_side.py index 6034958..908aa9b 100644 --- a/Testing/parsing/test_side.py +++ b/Testing/parsing/test_side.py @@ -13,13 +13,13 @@ def test_parser(): assert ret.data.to_side() == objects.side2 ret = objects.side_parser.parse( - "B(T{s})::cell + B(T{s}).D().K(T{s},S{s},S{_})::cell + B(T{s}).D().K(T{s},S{s},S{_})::cell" + "B(T{s})::cell + B(T{s}).D().K(T{s},S{s},U{a})::cell + B(T{s}).D().K(T{s},S{s},U{a})::cell" ) assert ret.success assert ret.data.to_side() == objects.side3 ret = objects.side_parser.parse( - "B(T{s})::cell + 2 B(T{s}).D().K(T{s},S{s},S{_})::cell" + "B(T{s})::cell + 2 B(T{s}).D().K(T{s},S{s},U{a})::cell" ) assert ret.success assert ret.data.to_side() == objects.side3 @@ -48,3 +48,9 @@ def test_parser(): ret = objects.side_parser.parse("B(T{s}") assert not ret.success + + # not unique atomics in structure + ret = objects.side_parser.parse( + "B(T{s})::cell + B(T{s}).D().K(T{s},S{s},S{_})::cell + B(T{s}).D().K(T{s},S{s},S{_})::cell" + ) + assert not ret.success diff --git a/Testing/parsing/test_structure.py b/Testing/parsing/test_structure.py index bed18af..a7fbfd4 100644 --- a/Testing/parsing/test_structure.py +++ b/Testing/parsing/test_structure.py @@ -4,7 +4,7 @@ import Testing.objects_testing as objects def test_parser(): assert objects.structure_parser.parse("B(T{s})").data == objects.s1 assert objects.structure_parser.parse("D()").data == objects.s2 - assert objects.structure_parser.parse("K(T{s}, S{s}, S{_})").data == objects.s3 + assert objects.structure_parser.parse("K(T{s}, S{s}, U{a})").data == objects.s3 assert objects.structure_parser.parse("B(T{_})").data == objects.s4 assert objects.structure_parser.parse("D(S{_},T{p})").data == objects.s5 assert objects.structure_parser.parse("K()").data == objects.s6 @@ -18,3 +18,6 @@ def test_parser(): assert not objects.structure_parser.parse("[B(T{s})]").success assert not objects.structure_parser.parse("").success assert not objects.structure_parser.parse("B({s})").success + assert not objects.structure_parser.parse("B(S{s}, S{a})").success + assert not objects.structure_parser.parse("B(S{a}, S{a})").success + assert not objects.structure_parser.parse("B(S{_}, S{a})").success
raise error when an undefined complex alias is used in a rule Raise `ComplexParsingError` when the alias is not defined.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "Testing/parsing/test_complex.py::test_parser", "Testing/parsing/test_side.py::test_parser", "Testing/parsing/test_structure.py::test_parser" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2024-03-21T14:35:33Z"
mit
sybila__eBCSgen-107
diff --git a/eBCSgen/Parsing/ParseBCSL.py b/eBCSgen/Parsing/ParseBCSL.py index 4bb2843..480317f 100644 --- a/eBCSgen/Parsing/ParseBCSL.py +++ b/eBCSgen/Parsing/ParseBCSL.py @@ -3,7 +3,7 @@ import json import numpy as np from numpy import inf from copy import deepcopy -from lark import Lark, Token, Transformer, Tree +from lark import Lark, Transformer, Tree from lark import UnexpectedCharacters, UnexpectedToken, UnexpectedEOF from lark.load_grammar import _TERMINAL_NAMES import regex @@ -116,7 +116,7 @@ GRAMMAR = r""" init: const? rate_complex definition: def_param "=" number - rule: ((label)? side ARROW side ("@" rate)? (";" variable)?) | ((label)? side BI_ARROW side ("@" rate "|" rate )? (";" variable)?) + rule: ((label)? side arrow side ("@" rate)? (";" variable)?) | ((label)? side BI_ARROW side ("@" rate "|" rate )? (";" variable)?) cmplx_dfn: cmplx_name "=" value side: (const? complex "+")* (const? complex)? @@ -131,8 +131,10 @@ GRAMMAR = r""" COM: "//" POW: "**" - ARROW: "=>" + arrow: SINGLE_ARROW | REPLICATION_ARROW + SINGLE_ARROW: "=>" BI_ARROW: "<=>" + REPLICATION_ARROW: "=*>" RULES_START: "#! rules" INITS_START: "#! inits" DEFNS_START: "#! definitions" @@ -647,18 +649,21 @@ class TreeToObjects(Transformer): ) ) pairs = [(i, i + lhs.counter) for i in range(min(lhs.counter, rhs.counter))] - if lhs.counter > rhs.counter: + if type(arrow) is Tree and arrow.children[0].value == "=*>": + if lhs.counter >= rhs.counter or lhs.counter != 1 or rhs.counter <= 1: + raise UnspecifiedParsingError("Rule does not contain replication") + + for i in range(lhs.counter, rhs.counter): + if lhs.seq[pairs[-1][0]] == rhs.seq[pairs[-1][1] - lhs.counter]: + if rhs.seq[pairs[-1][1] - lhs.counter] == rhs.seq[i]: + pairs += [(pairs[-1][0], i + lhs.counter)] + else: + raise UnspecifiedParsingError("Rule does not contain replication") + + elif lhs.counter > rhs.counter: pairs += [(i, None) for i in range(rhs.counter, lhs.counter)] elif lhs.counter < rhs.counter: - for i in range(lhs.counter, rhs.counter): - replication = False - if lhs.counter == 1 and rhs.counter > 1: - if lhs.seq[pairs[-1][0]] == rhs.seq[pairs[-1][1] - lhs.counter]: - if rhs.seq[pairs[-1][1] - lhs.counter] == rhs.seq[i]: - pairs += [(pairs[-1][0], i + lhs.counter)] - replication = True - if not replication: - pairs += [(None, i + lhs.counter)] + pairs += [(None, i + lhs.counter) for i in range(lhs.counter, rhs.counter)] reversible = False if arrow == "<=>":
sybila/eBCSgen
902a02bc29b7e7da70ba34b5f96bf592832c63cd
diff --git a/Testing/objects_testing.py b/Testing/objects_testing.py index a324d26..a39e1fc 100644 --- a/Testing/objects_testing.py +++ b/Testing/objects_testing.py @@ -352,6 +352,35 @@ rule_no_change = Rule( sequence_no_change, mid_c1, compartments_c1, complexes_c1, pairs_c1, rate_c1 ) +sequence_repl1 = (s31, s31, s31) +mid_repl1 = 1 +compartments_repl1 = ["rep"] * 3 +complexes_repl1 = [(0, 0), (1, 1), (2, 2)] +pairs_repl1 = [(0, 1), (0, 2)] +rate_repl1 = Rate("3.0*[X()::rep]/2.0*v_1") + +rule_repl1 = Rule( + sequence_repl1, mid_repl1, compartments_repl1, complexes_repl1, pairs_repl1, None +) +rule_repl1_rate = Rule( + sequence_repl1, + mid_repl1, + compartments_repl1, + complexes_repl1, + pairs_repl1, + rate_repl1, +) + +repl_sequence2 = (s31, s31, s31, s31) +mid_repl2 = 1 +compartments_repl2 = ["rep"] * 4 +complexes_repl2 = [(0, 0), (1, 1), (2, 2), (3, 3)] +pairs_repl2 = [(0, 1), (0, 2), (0, 3)] + +rule_repl2 = Rule( + repl_sequence2, mid_repl2, compartments_repl2, complexes_repl2, pairs_repl2, None +) + # reactions reaction1 = Reaction(lhs, rhs, rate_5) diff --git a/Testing/parsing/test_rule.py b/Testing/parsing/test_rule.py index e3294e4..3ce55df 100644 --- a/Testing/parsing/test_rule.py +++ b/Testing/parsing/test_rule.py @@ -1,6 +1,7 @@ import pytest import Testing.objects_testing as objects +from eBCSgen.Core.Rate import Rate def test_parser(): @@ -70,3 +71,27 @@ def test_bidirectional(): rule_expr = "#! rules\nK(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt @ 3*[K()::cyt]/2*v_1 | 2*[K()::cyt]/3*v_1" assert not objects.rules_parser.parse(rule_expr).success + + +def test_replication(): + rule_expr = "X()::rep =*> X()::rep + X()::rep" + result = objects.rule_parser.parse(rule_expr) + assert result.success + assert result.data[1] == objects.rule_repl1 + + rule_expr = "X()::rep =*> X()::rep + X()::rep @ 3*[X()::rep]/2*v_1" + result = objects.rule_parser.parse(rule_expr) + assert result.success + rate_repl1 = Rate("3.0*[X()::rep]/2*v_1") + assert result.data[1] == objects.rule_repl1_rate + + rule_expr = "X()::rep =*> X()::rep + X()::rep + X()::rep" + result = objects.rule_parser.parse(rule_expr) + assert result.success + assert result.data[1] == objects.rule_repl2 + + rule_expr = "X()::rep + Y()::rep =*> X()::rep + X()::rep" + assert not objects.rule_parser.parse(rule_expr).success + + rule_expr = "X()::rep =*> X()::rep + X()::rep + Y()::rep" + assert not objects.rule_parser.parse(rule_expr).success
replication rules replication rules - this feature is quite hardcoded, the first question is whether we want to support it at all, then how to do it in a robust way
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "Testing/parsing/test_rule.py::test_replication" ]
[ "Testing/parsing/test_rule.py::test_parser", "Testing/parsing/test_rule.py::test_bidirectional" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-04-15T12:01:00Z"
mit
sybila__eBCSgen-109
diff --git a/eBCSgen/Parsing/ParseBCSL.py b/eBCSgen/Parsing/ParseBCSL.py index 4bb2843..8368f44 100644 --- a/eBCSgen/Parsing/ParseBCSL.py +++ b/eBCSgen/Parsing/ParseBCSL.py @@ -3,7 +3,7 @@ import json import numpy as np from numpy import inf from copy import deepcopy -from lark import Lark, Token, Transformer, Tree +from lark import Lark, Transformer, Tree from lark import UnexpectedCharacters, UnexpectedToken, UnexpectedEOF from lark.load_grammar import _TERMINAL_NAMES import regex @@ -106,17 +106,18 @@ class SideHelper: GRAMMAR = r""" model: (sections)* rules (sections | rules)* - sections: inits | definitions | complexes | regulation + sections: inits | definitions | complexes | regulation | observables rules: RULES_START _NL+ (rule _NL+)* rule _NL* inits: INITS_START _NL+ (init _NL+)* init _NL* definitions: DEFNS_START _NL+ (definition _NL+)* definition _NL* complexes: COMPLEXES_START _NL+ (cmplx_dfn _NL+)* cmplx_dfn _NL* regulation: REGULATION_START _NL+ regulation_def _NL* + observables: OBSERVABLES_START _NL+ (observable _NL+)* observable _NL* init: const? rate_complex definition: def_param "=" number - rule: ((label)? side ARROW side ("@" rate)? (";" variable)?) | ((label)? side BI_ARROW side ("@" rate "|" rate )? (";" variable)?) + rule: ((label)? side arrow side ("@" rate)? (";" variable)?) | ((label)? side BI_ARROW side ("@" rate "|" rate )? (";" variable)?) cmplx_dfn: cmplx_name "=" value side: (const? complex "+")* (const? complex)? @@ -131,13 +132,16 @@ GRAMMAR = r""" COM: "//" POW: "**" - ARROW: "=>" + arrow: SINGLE_ARROW | REPLICATION_ARROW + SINGLE_ARROW: "=>" BI_ARROW: "<=>" + REPLICATION_ARROW: "=*>" RULES_START: "#! rules" INITS_START: "#! inits" DEFNS_START: "#! definitions" COMPLEXES_START: "#! complexes" REGULATION_START: "#! regulation" + OBSERVABLES_START: "#! observables" _NL: /(\r?\n[\t ]*)+/ !label: CNAME "~" @@ -239,6 +243,11 @@ REGEX_GRAMMAR = r""" REGEX_CHAR: /[^\\^$().*+?{}\[\]|]/ """ +OBSERVABLES_GRAMMAR = """ + observable: CNAME ":" observable_pattern + !observable_pattern: const | complex | observable_pattern "+" observable_pattern | observable_pattern "-" observable_pattern | observable_pattern "*" observable_pattern | observable_pattern "/" observable_pattern | observable_pattern POW const | "(" observable_pattern ")" +""" + class TransformRegulations(Transformer): def regulation(self, matches): @@ -647,31 +656,38 @@ class TreeToObjects(Transformer): ) ) pairs = [(i, i + lhs.counter) for i in range(min(lhs.counter, rhs.counter))] - if lhs.counter > rhs.counter: + if type(arrow) is Tree and arrow.children[0].value == "=*>": + if lhs.counter >= rhs.counter or lhs.counter != 1 or rhs.counter <= 1: + raise UnspecifiedParsingError("Rule does not contain replication") + + for i in range(lhs.counter, rhs.counter): + if lhs.seq[pairs[-1][0]] == rhs.seq[pairs[-1][1] - lhs.counter]: + if rhs.seq[pairs[-1][1] - lhs.counter] == rhs.seq[i]: + pairs += [(pairs[-1][0], i + lhs.counter)] + else: + raise UnspecifiedParsingError("Rule does not contain replication") + + elif lhs.counter > rhs.counter: pairs += [(i, None) for i in range(rhs.counter, lhs.counter)] elif lhs.counter < rhs.counter: - for i in range(lhs.counter, rhs.counter): - replication = False - if lhs.counter == 1 and rhs.counter > 1: - if lhs.seq[pairs[-1][0]] == rhs.seq[pairs[-1][1] - lhs.counter]: - if rhs.seq[pairs[-1][1] - lhs.counter] == rhs.seq[i]: - pairs += [(pairs[-1][0], i + lhs.counter)] - replication = True - if not replication: - pairs += [(None, i + lhs.counter)] + pairs += [(None, i + lhs.counter) for i in range(lhs.counter, rhs.counter)] reversible = False if arrow == "<=>": reversible = True - return reversible, Rule( - agents, - mid, - compartments, - complexes, - pairs, - Rate(rate1) if rate1 else None, - label, - ), Rate(rate2) if rate2 else None + return ( + reversible, + Rule( + agents, + mid, + compartments, + complexes, + pairs, + Rate(rate1) if rate1 else None, + label, + ), + Rate(rate2) if rate2 else None, + ) def rules(self, matches): rules = [] @@ -703,6 +719,15 @@ class TreeToObjects(Transformer): result[init[0].children[0]] = 1 return {"inits": result} + def observable(self, matches): + return {str(matches[0]): matches[1].children} + + def observables(self, matches): + result = dict() + for observable in matches[1:]: + result.update(observable) + return {"observables": result} + def param(self, matches): self.params.add(str(matches[0])) return Tree("param", matches) @@ -712,6 +737,7 @@ class TreeToObjects(Transformer): definitions = dict() regulation = None inits = collections.Counter() + observables = dict() for match in matches: if type(match) == dict: key, value = list(match.items())[0] @@ -728,6 +754,8 @@ class TreeToObjects(Transformer): inits.update(value) elif key == "definitions": definitions.update(value) + elif key == "observables": + observables.update(value) elif key == "regulation": if regulation: raise UnspecifiedParsingError("Multiple regulations") @@ -749,9 +777,13 @@ class Parser: + EXTENDED_GRAMMAR + REGULATIONS_GRAMMAR + REGEX_GRAMMAR + + OBSERVABLES_GRAMMAR ) self.parser = Lark( - grammar, parser="earley", propagate_positions=False, maybe_placeholders=False + grammar, + parser="earley", + propagate_positions=False, + maybe_placeholders=False, ) self.terminals = dict((v, k) for k, v in _TERMINAL_NAMES.items()) @@ -856,7 +888,7 @@ class Parser: return Result( False, { - "unexpected": str(u.token), + "unexpected": str(u.token), "expected": self.replace(u.expected), "line": u.line, "column": u.column,
sybila/eBCSgen
902a02bc29b7e7da70ba34b5f96bf592832c63cd
diff --git a/Testing/objects_testing.py b/Testing/objects_testing.py index a324d26..171ba58 100644 --- a/Testing/objects_testing.py +++ b/Testing/objects_testing.py @@ -22,6 +22,8 @@ rate_complex_parser = Parser("rate_complex") rule_parser = Parser("rule") rules_parser = Parser("rules") model_parser = Parser("model") +observables_parser = Parser("observables") +observable_parser = Parser("observable") # atomic a1 = AtomicAgent("T", "s") @@ -352,6 +354,35 @@ rule_no_change = Rule( sequence_no_change, mid_c1, compartments_c1, complexes_c1, pairs_c1, rate_c1 ) +sequence_repl1 = (s31, s31, s31) +mid_repl1 = 1 +compartments_repl1 = ["rep"] * 3 +complexes_repl1 = [(0, 0), (1, 1), (2, 2)] +pairs_repl1 = [(0, 1), (0, 2)] +rate_repl1 = Rate("3.0*[X()::rep]/2.0*v_1") + +rule_repl1 = Rule( + sequence_repl1, mid_repl1, compartments_repl1, complexes_repl1, pairs_repl1, None +) +rule_repl1_rate = Rule( + sequence_repl1, + mid_repl1, + compartments_repl1, + complexes_repl1, + pairs_repl1, + rate_repl1, +) + +repl_sequence2 = (s31, s31, s31, s31) +mid_repl2 = 1 +compartments_repl2 = ["rep"] * 4 +complexes_repl2 = [(0, 0), (1, 1), (2, 2), (3, 3)] +pairs_repl2 = [(0, 1), (0, 2), (0, 3)] + +rule_repl2 = Rule( + repl_sequence2, mid_repl2, compartments_repl2, complexes_repl2, pairs_repl2, None +) + # reactions reaction1 = Reaction(lhs, rhs, rate_5) diff --git a/Testing/parsing/test_observables.py b/Testing/parsing/test_observables.py new file mode 100644 index 0000000..75340a9 --- /dev/null +++ b/Testing/parsing/test_observables.py @@ -0,0 +1,67 @@ +import pytest + +import Testing.objects_testing as objects + + +def test_parser(): + observable_expr1 = "abc: A()::cell" + assert objects.observable_parser.parse(observable_expr1) + + observable_expr2 = "efg: E(F{_})::cell" + assert objects.observable_parser.parse(observable_expr2) + + observable_expr3 = "hij: H()::cell" + assert objects.observable_parser.parse(observable_expr3) + + observable_expr4 = "klm: K()::cyt * L()::cell + M()::cell" + assert objects.observable_parser.parse(observable_expr4) + + observable_expr5 = "nop: N()::cell" + assert objects.observable_parser.parse(observable_expr5).success + + observable_expr6 = "qrs: Q().R().S()::cell" + assert objects.observable_parser.parse(observable_expr6).success + + observable_expr7 = "tuv: T(U{v})::cell " + assert objects.observable_parser.parse(observable_expr7).success + + observable_expr8 = "wx: 2 * W{x}::cell" + assert objects.observable_parser.parse(observable_expr8).success + + observable_expr9 = "z: Y{z}::cyt + Z{y}::ext" + assert objects.observable_parser.parse(observable_expr9).success + + observable_expr10 = "z: 2 * Y{z}::cyt + Z{y}::ext ** 2" + assert objects.observable_parser.parse(observable_expr10).success + + observable_expr10 = "z: (Y{z}::cell + Z{y}::cyt) / 2.1 ** 10" + assert objects.observable_parser.parse(observable_expr10).success + + observable_expr11 = "scaled_A: 1000 * A{i}::cell" + assert objects.observable_parser.parse(observable_expr11).success + + observable_expr12 = "obs_A_all: A{i}::cell + A{a}::cell" + assert objects.observable_parser.parse(observable_expr12).success + + observables_expr = ( + "#! observables\n" + + observable_expr1 + + "\n" + + observable_expr2 + + "\n" + + observable_expr3 + + "\n" + + observable_expr4 + + "\n" + + observable_expr5 + + "\n" + + observable_expr6 + ) + assert objects.observables_parser.parse(observables_expr).success + + assert not objects.observable_parser.parse("A()::cell > 2").success + assert not objects.observable_parser.parse("a: A(::cell").success + assert not objects.observable_parser.parse("a: b: A():cell > 2").success + assert not objects.observable_parser.parse("a: 2 > A():cell").success + assert not objects.observable_parser.parse("a: A()::cell$").success + assert not objects.observable_parser.parse("a: A{}::cell").success diff --git a/Testing/parsing/test_rule.py b/Testing/parsing/test_rule.py index e3294e4..3ce55df 100644 --- a/Testing/parsing/test_rule.py +++ b/Testing/parsing/test_rule.py @@ -1,6 +1,7 @@ import pytest import Testing.objects_testing as objects +from eBCSgen.Core.Rate import Rate def test_parser(): @@ -70,3 +71,27 @@ def test_bidirectional(): rule_expr = "#! rules\nK(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt @ 3*[K()::cyt]/2*v_1 | 2*[K()::cyt]/3*v_1" assert not objects.rules_parser.parse(rule_expr).success + + +def test_replication(): + rule_expr = "X()::rep =*> X()::rep + X()::rep" + result = objects.rule_parser.parse(rule_expr) + assert result.success + assert result.data[1] == objects.rule_repl1 + + rule_expr = "X()::rep =*> X()::rep + X()::rep @ 3*[X()::rep]/2*v_1" + result = objects.rule_parser.parse(rule_expr) + assert result.success + rate_repl1 = Rate("3.0*[X()::rep]/2*v_1") + assert result.data[1] == objects.rule_repl1_rate + + rule_expr = "X()::rep =*> X()::rep + X()::rep + X()::rep" + result = objects.rule_parser.parse(rule_expr) + assert result.success + assert result.data[1] == objects.rule_repl2 + + rule_expr = "X()::rep + Y()::rep =*> X()::rep + X()::rep" + assert not objects.rule_parser.parse(rule_expr).success + + rule_expr = "X()::rep =*> X()::rep + X()::rep + Y()::rep" + assert not objects.rule_parser.parse(rule_expr).success
define observables define observables - pools, scaling (basic arithmetics).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "Testing/parsing/test_observables.py::test_parser", "Testing/parsing/test_rule.py::test_parser", "Testing/parsing/test_rule.py::test_bidirectional", "Testing/parsing/test_rule.py::test_replication" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-04-16T14:38:35Z"
mit
sybila__eBCSgen-92
diff --git a/eBCSgen/Parsing/ParseBCSL.py b/eBCSgen/Parsing/ParseBCSL.py index a9ac674..e70646c 100644 --- a/eBCSgen/Parsing/ParseBCSL.py +++ b/eBCSgen/Parsing/ParseBCSL.py @@ -156,9 +156,9 @@ GRAMMAR = r""" EXTENDED_GRAMMAR = """ abstract_sequence: atomic_complex | atomic_structure_complex | structure_complex - atomic_complex: atomic ":" (cmplx_name|VAR) - atomic_structure_complex: atomic ":" structure ":" (cmplx_name|VAR) - structure_complex: structure ":" (cmplx_name|VAR) + atomic_complex: atomic ":" (VAR | value) + atomic_structure_complex: atomic ":" structure ":" (VAR | value) + structure_complex: structure ":" (VAR | value) variable: VAR "=" "{" cmplx_name ("," cmplx_name)+ "}" VAR: "?" @@ -401,18 +401,23 @@ class TransformAbstractSyntax(Transformer): Raises: ComplexParsingError: If no matching struct is found in the complex. """ - for i in range(len(complex.children)): - if self.get_name(struct) == self.get_name(complex.children[i].children[0]): + if isinstance(complex.children[0].children[0].children[0].children[0], Tree): + search = complex.children[0] + else: + search = complex + + for i in range(len(search.children)): + if self.get_name(struct) == self.get_name(search.children[i].children[0]): struct_found = True # search same name structs - if they contain atomics with matching names, they are considered incompatible for j in range(len(struct.children[1].children)): for k in range( - len(complex.children[i].children[0].children[1].children) + len(search.children[i].children[0].children[1].children) ): if self.get_name( struct.children[1].children[j] ) == self.get_name( - complex.children[i].children[0].children[1].children[k] + search.children[i].children[0].children[1].children[k] ): struct_found = False break @@ -422,13 +427,11 @@ class TransformAbstractSyntax(Transformer): if struct_found: # if the complex's struct is empty, replace it with the struct - if self.is_empty(complex.children[i]): - complex.children[i] = Tree("agent", [struct]) + if self.is_empty(search.children[i]): + search.children[i] = Tree("agent", [struct]) else: # if the complex's struct is not empty merge the struct's children into the complex's struct - complex.children[i].children[0].children[ - 1 - ].children += struct.children[1].children + search.children[i].children[0].children[1].children += struct.children[1].children return complex raise ComplexParsingError( @@ -450,10 +453,15 @@ class TransformAbstractSyntax(Transformer): Raises: ComplexParsingError: If an atomic with the same name is already present in the complex. """ - for i in range(len(complex.children)): - if self.get_name(atomic) == self.get_name(complex.children[i].children[0]): - if self.is_empty(complex.children[i].children[0]): - complex.children[i] = Tree("agent", [atomic]) + if isinstance(complex.children[0].children[0].children[0].children[0], Tree): + search = complex.children[0] + else: + search = complex + + for i in range(len(search.children)): + if self.get_name(atomic) == self.get_name(search.children[i].children[0]): + if self.is_empty(search.children[i].children[0]): + search.children[i] = Tree("agent", [atomic]) return complex raise ComplexParsingError( f"Illegal atomic nesting or duplication: {atomic}:{complex}", complex
sybila/eBCSgen
230b3745b6ffaf930f9f30907925041856fb4cdd
diff --git a/Testing/models/model_cmplx_in_abstr_seq1.txt b/Testing/models/model_cmplx_in_abstr_seq1.txt new file mode 100644 index 0000000..f03c61d --- /dev/null +++ b/Testing/models/model_cmplx_in_abstr_seq1.txt @@ -0,0 +1,5 @@ +#! rules +S{i}:A():A2::cell => A()::cell + +#! complexes +A2 = A().A() diff --git a/Testing/models/model_cmplx_in_abstr_seq2.txt b/Testing/models/model_cmplx_in_abstr_seq2.txt new file mode 100644 index 0000000..801026b --- /dev/null +++ b/Testing/models/model_cmplx_in_abstr_seq2.txt @@ -0,0 +1,2 @@ +#! rules +S{i}:A():A().A()::cell => A()::cell diff --git a/Testing/parsing/test_cmplx_in_abstr_seq.py b/Testing/parsing/test_cmplx_in_abstr_seq.py new file mode 100644 index 0000000..068f47d --- /dev/null +++ b/Testing/parsing/test_cmplx_in_abstr_seq.py @@ -0,0 +1,17 @@ +import pytest + +from Testing.models.get_model_str import get_model_str +import Testing.objects_testing as objects + + +def test_complexes_in_abstract_sequence(): + # is allowed + model = get_model_str("model_cmplx_in_abstr_seq1") + ret1 = objects.model_parser.parse(model) + assert ret1.success + + # should be allowed + model = get_model_str("model_cmplx_in_abstr_seq2") + ret2 = objects.model_parser.parse(model) + assert ret2.success + assert ret1.data == ret2.data
Use complexes directly in `abstract_sequence` Allow usage of complexes directly in abstract_sequence - currently only aliases in place of complex are allowed
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "Testing/parsing/test_cmplx_in_abstr_seq.py::test_complexes_in_abstract_sequence" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-31T09:00:55Z"
mit
sybila__eBCSgen-96
diff --git a/eBCSgen/Core/Rule.py b/eBCSgen/Core/Rule.py index 9faa67f..2bd5932 100644 --- a/eBCSgen/Core/Rule.py +++ b/eBCSgen/Core/Rule.py @@ -15,7 +15,16 @@ def column(lst, index): class Rule: - def __init__(self, agents: tuple, mid: int, compartments: list, complexes: list, pairs: list, rate: Rate, label=None): + def __init__( + self, + agents: tuple, + mid: int, + compartments: list, + complexes: list, + pairs: list, + rate: Rate, + label=None, + ): """ Class to represent BCSL rule @@ -35,9 +44,15 @@ class Rule: self.label = label self.comment = (False, []) - def __eq__(self, other: 'Rule'): - return self.agents == other.agents and self.mid == other.mid and self.compartments == other.compartments and \ - self.complexes == other.complexes and self.pairs == other.pairs and str(self.rate) == str(other.rate) + def __eq__(self, other: "Rule"): + return ( + self.agents == other.agents + and self.mid == other.mid + and self.compartments == other.compartments + and self.complexes == other.complexes + and self.pairs == other.pairs + and str(self.rate) == str(other.rate) + ) def __repr__(self): return str(self) @@ -47,14 +62,23 @@ class Rule: rate = " @ " + str(self.rate) if self.rate else "" pre_comment, post_comment = "", "" if self.comment[1]: - comment = "// redundant #{" + ", ".join(list(map(str, self.comment[1]))) + "} " + comment = ( + "// redundant #{" + ", ".join(list(map(str, self.comment[1]))) + "} " + ) pre_comment = comment + "// " if self.comment[0] else "" post_comment = " " + comment if not self.comment[0] else "" label = str(self.label) + " ~ " if self.label else "" - return label + pre_comment + " + ".join(lhs.to_list_of_strings()) + \ - " => " + " + ".join(rhs.to_list_of_strings()) + rate + post_comment + return ( + label + + pre_comment + + " + ".join(lhs.to_list_of_strings()) + + " => " + + " + ".join(rhs.to_list_of_strings()) + + rate + + post_comment + ) def __lt__(self, other): return str(self) < str(other) @@ -70,10 +94,12 @@ class Rule: :return: dict of {Complexes:{SBML codes of all isomorphisms in set}} """ unique_complexes_from_rule = dict() - for (f, t) in self.complexes: - c = Complex(self.agents[f:t + 1], self.compartments[f]) - double = (c, c.to_SBML_species_code()) - unique_complexes_from_rule[c] = unique_complexes_from_rule.get(c, set()) | {double} + for f, t in self.complexes: + c = Complex(self.agents[f : t + 1], self.compartments[f]) + double = (c, c.to_SBML_species_code()) + unique_complexes_from_rule[c] = unique_complexes_from_rule.get(c, set()) | { + double + } return unique_complexes_from_rule def create_complexes(self): @@ -83,8 +109,8 @@ class Rule: :return: two multisets of Complexes represented as object Side """ lhs, rhs = [], [] - for (f, t) in self.complexes: - c = Complex(self.agents[f:t + 1], self.compartments[f]) + for f, t in self.complexes: + c = Complex(self.agents[f : t + 1], self.compartments[f]) lhs.append(c) if t < self.mid else rhs.append(c) return Side(lhs), Side(rhs) @@ -108,7 +134,9 @@ class Rule: if self.rate: self.rate.vectorize(ordering, definitions) - def create_reactions(self, atomic_signature: dict, structure_signature: dict) -> set: + def create_reactions( + self, atomic_signature: dict, structure_signature: dict + ) -> set: """ Create all possible reactions. Decide if rule is of replication type and call corresponding lower level method. @@ -118,13 +146,21 @@ class Rule: :return: set of created reactions """ unique_lhs_indices = set(column(self.pairs, 0)) - if len(self.pairs) > 1 and len(unique_lhs_indices) == 1 and None not in unique_lhs_indices: + if ( + len(self.pairs) > 1 + and len(unique_lhs_indices) == 1 + and None not in unique_lhs_indices + ): # should be the replication rule - return self._create_replication_reactions(atomic_signature, structure_signature) + return self._create_replication_reactions( + atomic_signature, structure_signature + ) else: return self._create_normal_reactions(atomic_signature, structure_signature) - def _create_replication_reactions(self, atomic_signature: dict, structure_signature: dict) -> set: + def _create_replication_reactions( + self, atomic_signature: dict, structure_signature: dict + ) -> set: """ Create reaction from rule of special form for replication (A -> 2 A) @@ -144,13 +180,22 @@ class Rule: # replicate RHS agent n times for _ in range(len(self.pairs)): new_agents.append(deepcopy(new_agents[-1])) - new_rule = Rule(tuple(new_agents), self.mid, self.compartments, - self.complexes, self.pairs, self.rate, self.label) + new_rule = Rule( + tuple(new_agents), + self.mid, + self.compartments, + self.complexes, + self.pairs, + self.rate, + self.label, + ) reactions.add(new_rule.to_reaction()) return reactions - def _create_normal_reactions(self, atomic_signature: dict, structure_signature: dict) -> set: + def _create_normal_reactions( + self, atomic_signature: dict, structure_signature: dict + ) -> set: """ Adds context to all agents and generated all possible combinations. Then, new rules with these enhances agents are generated and converted to Reactions. @@ -160,7 +205,7 @@ class Rule: :return: set of created reactions """ results = [] - for (l, r) in self.pairs: + for l, r in self.pairs: if l is None: right = -1 left = self.agents[r] @@ -170,17 +215,27 @@ class Rule: else: left = self.agents[l] right = self.agents[r] - results.append(left.add_context(right, atomic_signature, structure_signature)) + results.append( + left.add_context(right, atomic_signature, structure_signature) + ) reactions = set() for result in itertools.product(*results): new_agents = tuple(filter(None, column(result, 0) + column(result, 1))) - new_rule = Rule(new_agents, self.mid, self.compartments, self.complexes, self.pairs, self.rate, self.label) + new_rule = Rule( + new_agents, + self.mid, + self.compartments, + self.complexes, + self.pairs, + self.rate, + self.label, + ) reactions.add(new_rule.to_reaction()) return reactions - def compatible(self, other: 'Rule') -> bool: + def compatible(self, other: "Rule") -> bool: """ Checks whether Rule is compatible (position-wise) with the other Rule. Is done by formaly translating to Reactions (just a better object handling). @@ -201,7 +256,14 @@ class Rule: """ new_agents = tuple([agent.reduce_context() for agent in self.agents]) new_rate = self.rate.reduce_context() if self.rate else None - return Rule(new_agents, self.mid, self.compartments, self.complexes, self.pairs, new_rate) + return Rule( + new_agents, + self.mid, + self.compartments, + self.complexes, + self.pairs, + new_rate, + ) def is_meaningful(self) -> bool: """ @@ -231,7 +293,9 @@ class Rule: :param structure_signature: given structure signature :return: set of all created Complexes """ - return self.to_reaction().create_all_compatible(atomic_signature, structure_signature) + return self.to_reaction().create_all_compatible( + atomic_signature, structure_signature + ) def evaluate_rate(self, state, params): """ @@ -242,7 +306,7 @@ class Rule: :return: a real number of the rate """ values = dict() - for (state_complex, count) in state.content.value.items(): + for state_complex, count in state.content.value.items(): for agent in self.rate_agents: if agent.compatible(state_complex): values[agent] = values.get(agent, 0) + count @@ -277,16 +341,24 @@ class Rule: # replace respective agents unique_lhs_indices = set(column(self.pairs, 0)) - if len(self.pairs) > 1 and len(unique_lhs_indices) == 1 and \ - None not in unique_lhs_indices and len(aligned_match) == 1: + if ( + len(self.pairs) > 1 + and len(unique_lhs_indices) == 1 + and None not in unique_lhs_indices + and len(aligned_match) == 1 + ): resulting_rhs = self._replace_replicated_rhs(aligned_match[0]) else: resulting_rhs = self._replace_normal_rhs(aligned_match) # construct resulting complexes output_complexes = [] - for (f, t) in list(filter(lambda item: item[0] >= self.mid, self.complexes)): - output_complexes.append(Complex(resulting_rhs[f - self.mid:t - self.mid + 1], self.compartments[f])) + for f, t in list(filter(lambda item: item[0] >= self.mid, self.complexes)): + output_complexes.append( + Complex( + resulting_rhs[f - self.mid : t - self.mid + 1], self.compartments[f] + ) + ) return Multiset(collections.Counter(output_complexes)) @@ -298,7 +370,7 @@ class Rule: :return: RHS with replaced agents """ resulting_rhs = [] - for i, rhs_agent in enumerate(self.agents[self.mid:]): + for i, rhs_agent in enumerate(self.agents[self.mid :]): if len(aligned_match) <= i: resulting_rhs.append(rhs_agent) else: @@ -329,11 +401,11 @@ class Rule: :return: multiset of constructed agents """ output_complexes = [] - for (f, t) in list(filter(lambda item: item[1] < self.mid, self.complexes)): - output_complexes.append(Complex(match[f:t + 1], self.compartments[f])) + for f, t in list(filter(lambda item: item[1] < self.mid, self.complexes)): + output_complexes.append(Complex(match[f : t + 1], self.compartments[f])) return Multiset(collections.Counter(output_complexes)) - def create_reversible(self): + def create_reversible(self, rate: Rate = None): """ Create a reversible version of the rule with _bw label. @@ -343,19 +415,22 @@ class Rule: :return: reversed Rule """ - agents = self.agents[self.mid:] + self.agents[:self.mid] + agents = self.agents[self.mid :] + self.agents[: self.mid] mid = len(self.agents) - self.mid - compartments = self.compartments[self.mid:] + self.compartments[:self.mid] - complexes = sorted([((f - self.mid) % len(self.agents), - (t - self.mid) % len(self.agents)) for (f, t) in self.complexes]) + compartments = self.compartments[self.mid :] + self.compartments[: self.mid] + complexes = sorted( + [ + ((f - self.mid) % len(self.agents), (t - self.mid) % len(self.agents)) + for (f, t) in self.complexes + ] + ) pairs = [] - for (l, r) in self.pairs: + for l, r in self.pairs: if l is None or r is None: pairs.append((r, l)) else: pairs.append((l, r)) - rate = self.rate label = None if self.label: label = self.label + "_bw" diff --git a/eBCSgen/Parsing/ParseBCSL.py b/eBCSgen/Parsing/ParseBCSL.py index e70646c..990fe7a 100644 --- a/eBCSgen/Parsing/ParseBCSL.py +++ b/eBCSgen/Parsing/ParseBCSL.py @@ -114,7 +114,7 @@ GRAMMAR = r""" init: const? rate_complex (COMMENT)? definition: def_param "=" number (COMMENT)? - rule: (label)? side ARROW side ("@" rate)? (";" variable)? (COMMENT)? + rule: ((label)? side ARROW side ("@" rate)? (";" variable)? (COMMENT)?) | ((label)? side BI_ARROW side ("@" rate "|" rate)? (";" variable)? (COMMENT)?) cmplx_dfn: cmplx_name "=" value (COMMENT)? side: (const? complex "+")* (const? complex)? @@ -129,7 +129,8 @@ GRAMMAR = r""" COM: "//" POW: "**" - ARROW: "=>" | "<=>" + ARROW: "=>" + BI_ARROW: "<=>" RULES_START: "#! rules" INITS_START: "#! inits" DEFNS_START: "#! definitions" @@ -567,14 +568,20 @@ class TreeToObjects(Transformer): def rule(self, matches): label = None # TODO create implicit label - rate = None + rate1 = None + rate2 = None + if len(matches) == 6: + label, lhs, arrow, rhs, rate1, rate2 = matches if len(matches) == 5: - label, lhs, arrow, rhs, rate = matches + if type(matches[0]) == str: + label, lhs, arrow, rhs, rate1 = matches + else: + lhs, arrow, rhs, rate1, rate2 = matches elif len(matches) == 4: if type(matches[0]) == str: label, lhs, arrow, rhs = matches else: - lhs, arrow, rhs, rate = matches + lhs, arrow, rhs, rate1 = matches else: lhs, arrow, rhs = matches agents = tuple(lhs.seq + rhs.seq) @@ -609,15 +616,15 @@ class TreeToObjects(Transformer): compartments, complexes, pairs, - Rate(rate) if rate else None, + Rate(rate1) if rate1 else None, label, - ) + ), Rate(rate2) if rate2 else None def rules(self, matches): rules = [] - for reversible, rule in matches[1:]: + for reversible, rule, new_rate in matches[1:]: if reversible: - reversible_rule = rule.create_reversible() + reversible_rule = rule.create_reversible(new_rate) rules.append(rule) rules.append(reversible_rule) else: @@ -695,7 +702,8 @@ class Parser: self.terminals.update( { "COM": "//", - "ARROW": "=>, <=>", + "ARROW": "=>", + "BI_ARROW": "<=>", "POW": "**", "DOUBLE_COLON": "::", "RULES_START": "#! rules",
sybila/eBCSgen
52f30b5211bc9d71314595159c46d65ecce2f400
diff --git a/Testing/objects_testing.py b/Testing/objects_testing.py index 5debd74..71742b5 100644 --- a/Testing/objects_testing.py +++ b/Testing/objects_testing.py @@ -20,6 +20,7 @@ state_parser = Parser("state") side_parser = Parser("side") rate_complex_parser = Parser("rate_complex") rule_parser = Parser("rule") +rules_parser = Parser("rules") model_parser = Parser("model") # atomic @@ -246,13 +247,75 @@ rate_3 = Rate("1.0/(1.0+([X()::rep])**4.0)") r3 = Rule(sequence_3, mid_3, compartments_3, complexes_3, pairs_3, rate_3) sequence_4 = (s34, s35, s36, s37) +reversed_sequence_4 = (s36, s37, s34, s35) mid_4 = 2 compartments_4 = ["cyt"] * 4 complexes_4 = [(0, 1), (2, 2), (3, 3)] +reversed_complexes_4 = [(0, 0), (1, 1), (2, 3)] pairs_4 = [(0, 2), (1, 3)] rate_4 = Rate("3.0*[K()::cyt]/2.0*v_1") +reversed_rate_4 = Rate("2.0*[K()::cyt]/3.0*v_1") r4 = Rule(sequence_4, mid_4, compartments_4, complexes_4, pairs_4, rate_4) +reversed_r4a = Rule( + reversed_sequence_4, mid_4, compartments_4, reversed_complexes_4, pairs_4, rate_4 +) +reversed_r4b = Rule( + reversed_sequence_4, + mid_4, + compartments_4, + reversed_complexes_4, + pairs_4, + reversed_rate_4, +) +sequence_one_side_bidirectional = (s36, s37) +mid_one_side_bidirectional_a = 2 +mid_one_side_bidirectional_b = 0 +compartments_one_side_bidirectional = ["cyt"] * 2 +complexes_one_side_bidirectional = [(0, 0), (1, 1)] +pairs_one_side_bidirectional_a = [(0, None), (1, None)] +pairs_one_side_bidirectional_b = [(None, 0), (None, 1)] +one_side_bidirectional_a = Rule( + sequence_one_side_bidirectional, + mid_one_side_bidirectional_a, + compartments_one_side_bidirectional, + complexes_one_side_bidirectional, + pairs_one_side_bidirectional_a, + rate_4, +) +one_side_bidirectional_b = Rule( + sequence_one_side_bidirectional, + mid_one_side_bidirectional_b, + compartments_one_side_bidirectional, + complexes_one_side_bidirectional, + pairs_one_side_bidirectional_b, + rate_4, +) +one_side_bidirectional_b_reversed_rate = Rule( + sequence_one_side_bidirectional, + mid_one_side_bidirectional_b, + compartments_one_side_bidirectional, + complexes_one_side_bidirectional, + pairs_one_side_bidirectional_b, + reversed_rate_4, +) +one_side_bidirectional_a_no_rate = Rule( + sequence_one_side_bidirectional, + mid_one_side_bidirectional_a, + compartments_one_side_bidirectional, + complexes_one_side_bidirectional, + pairs_one_side_bidirectional_a, + None, +) +one_side_bidirectional_b_no_rate = Rule( + sequence_one_side_bidirectional, + mid_one_side_bidirectional_b, + compartments_one_side_bidirectional, + complexes_one_side_bidirectional, + pairs_one_side_bidirectional_b, + None, +) + sequence_5 = (s34, s35, s36, s37, s38) mid_5 = 2 @@ -272,6 +335,9 @@ rate_6 = Rate("3.0*[K(T{3+})::cyt]/2.0*v_1") r6 = Rule(sequence_6, mid_6, compartments_6, complexes_6, pairs_6, rate_6) rule_no_rate = Rule(sequence_4, mid_4, compartments_4, complexes_4, pairs_4, None) +reversed_no_rate = Rule( + reversed_sequence_4, mid_4, compartments_4, reversed_complexes_4, pairs_4, None +) sequence_c1 = (s34, s35, s36, s37, s2) mid_c1 = 2 diff --git a/Testing/parsing/test_rule.py b/Testing/parsing/test_rule.py index a44b056..e3294e4 100644 --- a/Testing/parsing/test_rule.py +++ b/Testing/parsing/test_rule.py @@ -1,10 +1,8 @@ import pytest -from eBCSgen.Core.Rule import Rule -from eBCSgen.Core.Rate import Rate - import Testing.objects_testing as objects + def test_parser(): rule_expr = "K(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt + D(B{_})::cell @ 3*[K()::cyt]/2*v_1" assert objects.rule_parser.parse(rule_expr).data[1] == objects.r5 @@ -20,3 +18,55 @@ def test_parser(): rule_expr = "K(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt" assert objects.rule_parser.parse(rule_expr).data[1] == objects.rule_no_rate + + +def test_bidirectional(): + rule_expr = "#! rules\nK(S{u}).B()::cyt <=> K(S{p})::cyt + B()::cyt" + parsed = objects.rules_parser.parse(rule_expr) + assert parsed.success + assert objects.rule_no_rate in parsed.data["rules"] + assert objects.reversed_no_rate in parsed.data["rules"] + + rule_expr = "#! rules\nK(S{u}).B()::cyt <=> K(S{p})::cyt + B()::cyt @ 3*[K()::cyt]/2*v_1 | 3*[K()::cyt]/2*v_1" + parsed = objects.rules_parser.parse(rule_expr) + assert parsed.success + assert objects.r4 in parsed.data["rules"] + assert objects.reversed_r4a in parsed.data["rules"] + + rule_expr = "#! rules\nK(S{u}).B()::cyt <=> K(S{p})::cyt + B()::cyt @ 3*[K()::cyt]/2*v_1 | 2*[K()::cyt]/3*v_1" + parsed = objects.rules_parser.parse(rule_expr) + assert parsed.success + assert objects.r4 in parsed.data["rules"] + assert objects.reversed_r4b in parsed.data["rules"] + + rule_expr = "#! rules\n <=> K(S{p})::cyt + B()::cyt @ 3*[K()::cyt]/2*v_1 | 3*[K()::cyt]/2*v_1" + parsed = objects.rules_parser.parse(rule_expr) + assert parsed.success + assert objects.one_side_bidirectional_a in parsed.data["rules"] + assert objects.one_side_bidirectional_b in parsed.data["rules"] + + rule_expr = "#! rules\n K(S{p})::cyt + B()::cyt <=> @ 3*[K()::cyt]/2*v_1 | 3*[K()::cyt]/2*v_1" + parsed = objects.rules_parser.parse(rule_expr) + assert parsed.success + assert objects.one_side_bidirectional_a in parsed.data["rules"] + assert objects.one_side_bidirectional_b in parsed.data["rules"] + + rule_expr = "#! rules\n K(S{p})::cyt + B()::cyt <=> @ 3*[K()::cyt]/2*v_1 | 2*[K()::cyt]/3*v_1" + parsed = objects.rules_parser.parse(rule_expr) + assert parsed.success + assert objects.one_side_bidirectional_a in parsed.data["rules"] + assert objects.one_side_bidirectional_b_reversed_rate in parsed.data["rules"] + + rule_expr = "#! rules\n K(S{p})::cyt + B()::cyt <=>" + parsed = objects.rules_parser.parse(rule_expr) + assert parsed.success + assert objects.one_side_bidirectional_a_no_rate in parsed.data["rules"] + assert objects.one_side_bidirectional_b_no_rate in parsed.data["rules"] + + rule_expr = ( + "#! rules\nK(S{u}).B()::cyt <=> K(S{p})::cyt + B()::cyt @ 3*[K()::cyt]/2*v_1" + ) + assert not objects.rules_parser.parse(rule_expr).success + + rule_expr = "#! rules\nK(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt @ 3*[K()::cyt]/2*v_1 | 2*[K()::cyt]/3*v_1" + assert not objects.rules_parser.parse(rule_expr).success
Allow bidirectional rules In current version, bidirectional rules are not supported at all.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "Testing/parsing/test_rule.py::test_bidirectional" ]
[ "Testing/parsing/test_rule.py::test_parser" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-02-27T11:48:24Z"
mit
symerio__pgeocode-46
diff --git a/README.rst b/README.rst index b1b279c..a3af1ab 100644 --- a/README.rst +++ b/README.rst @@ -103,6 +103,27 @@ Defaults to ``~/pgeocode_data``, it is the directory where data is downloaded for later consumption. It can be changed using the environment variable ``PGEOCODE_DATA_DIR``, i.e. ``export PGEOCODE_DATA_DIR=/tmp/pgeocode_data``. +**Data sources** + +The data sources are provided as a list in the ``pgeocode.DOWNLOAD_URL`` variable. +The default value is, + +.. code:: + + DOWNLOAD_URL = [ + "https://download.geonames.org/export/zip/{country}.zip", + "https://symerio.github.io/postal-codes-data/data/geonames/{country}.txt", + ] + +Data sources are tried from first to last until one works. Here the second link is a mirror +of the first. + +It is also possible to extend this variable with third party data sources, as +long as they follow the same format. See for instance +[postal-codes-data](https://github.com/symerio/postal-codes-data/tree/master/data/geonames) +repository for examples of data files. + + License ------- diff --git a/doc/contributing.rst b/doc/contributing.rst index 914ab51..35f3408 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -9,6 +9,10 @@ Testing Unit tests can be run with, +.. code:: + + pip install pytest pytest-httpserver + .. code:: pytest diff --git a/pgeocode.py b/pgeocode.py index 191a37d..fe6a6d9 100644 --- a/pgeocode.py +++ b/pgeocode.py @@ -2,11 +2,13 @@ # # Authors: Roman Yurchak <[email protected]> +import contextlib import os import urllib.request import warnings from io import BytesIO -from typing import Any, Tuple +from typing import Any, Tuple, List +from zipfile import ZipFile import numpy as np import pandas as pd @@ -17,7 +19,13 @@ STORAGE_DIR = os.environ.get( "PGEOCODE_DATA_DIR", os.path.join(os.path.expanduser("~"), "pgeocode_data") ) -DOWNLOAD_URL = "https://download.geonames.org/export/zip/{country}.zip" +# A list of download locations. If the first URL fails, following ones will +# be used. +DOWNLOAD_URL = [ + "https://download.geonames.org/export/zip/{country}.zip", + "https://symerio.github.io/postal-codes-data/data/geonames/{country}.txt", +] + DATA_FIELDS = [ "country_code", @@ -121,11 +129,51 @@ COUNTRIES_VALID = [ ] -def _open_url(url: str) -> Tuple[BytesIO, Any]: - """Download contents for a URL""" [email protected] +def _open_extract_url(url: str, country: str) -> Any: + """Download contents for a URL + + If the file has a .zip extension, open it and extract the country + + Returns the opened file object. + """ with urllib.request.urlopen(url) as res: - reader = BytesIO(res.read()) - return reader, res.headers + with BytesIO(res.read()) as reader: + if url.endswith(".zip"): + with ZipFile(reader) as fh_zip: + with fh_zip.open(country.upper() + ".txt") as fh: + yield fh + else: + yield reader + + [email protected] +def _open_extract_cycle_url(urls: List[str], country: str) -> Any: + """Same as _open_extract_url but cycle through URLs until one works + + We start by opening the first URL in the list, and if fails + move to the next, until one works or the end of list is reached. + """ + if not isinstance(urls, list) or not len(urls): + raise ValueError(f"urls={urls} must be a list with at least one URL") + + err_msg = f"Provided download URLs failed {{err}}: {urls}" + for idx, val in enumerate(urls): + try: + with _open_extract_url(val, country) as fh: + yield fh + # Found a working URL, exit the loop. + break + except urllib.error.HTTPError as err: # type: ignore + if idx == len(urls) - 1: + raise + warnings.warn( + f"Download from {val} failed with: {err}. " + "Trying next URL in DOWNLOAD_URL list.", + UserWarning, + ) + else: + raise ValueError(err_msg) class Nominatim: @@ -168,23 +216,22 @@ class Nominatim: @staticmethod def _get_data(country: str) -> Tuple[str, pd.DataFrame]: """Load the data from disk; otherwise download and save it""" - from zipfile import ZipFile data_path = os.path.join(STORAGE_DIR, country.upper() + ".txt") if os.path.exists(data_path): data = pd.read_csv(data_path, dtype={"postal_code": str}) else: - url = DOWNLOAD_URL.format(country=country) - reader, headers = _open_url(url) - with ZipFile(reader) as fh_zip: - with fh_zip.open(country.upper() + ".txt") as fh: - data = pd.read_csv( - fh, - sep="\t", - header=None, - names=DATA_FIELDS, - dtype={"postal_code": str}, - ) + download_urls = [ + val.format(country=country) for val in DOWNLOAD_URL + ] + with _open_extract_cycle_url(download_urls, country) as fh: + data = pd.read_csv( + fh, + sep="\t", + header=None, + names=DATA_FIELDS, + dtype={"postal_code": str}, + ) if not os.path.exists(STORAGE_DIR): os.mkdir(STORAGE_DIR) data.to_csv(data_path, index=None)
symerio/pgeocode
e4aeceb647e28abe9ab21edaf943993385f6cb82
diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index c52c3b3..d7259f8 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -38,5 +38,5 @@ jobs: pip install flake8 - name: Test with pytest run: | - pip install pytest + pip install pytest pytest-httpserver pytest diff --git a/test_pgeocode.py b/test_pgeocode.py index d8c966a..fe64c6a 100644 --- a/test_pgeocode.py +++ b/test_pgeocode.py @@ -2,8 +2,10 @@ # # Authors: Roman Yurchak <[email protected]> import os -import shutil -import tempfile +import urllib +import json +from zipfile import ZipFile +from io import BytesIO import numpy as np import pandas as pd @@ -12,16 +14,13 @@ from numpy.testing import assert_allclose, assert_array_equal import pgeocode from pgeocode import GeoDistance, Nominatim, haversine_distance +from pgeocode import _open_extract_url @pytest.fixture -def temp_dir(): - path_save = pgeocode.STORAGE_DIR - path = tempfile.mkdtemp() - pgeocode.STORAGE_DIR = path - yield path - pgeocode.STORAGE_DIR = path_save - shutil.rmtree(path) +def temp_dir(tmpdir, monkeypatch): + monkeypatch.setattr(pgeocode, "STORAGE_DIR", str(tmpdir)) + yield str(tmpdir) def _normalize_str(x): @@ -179,3 +178,82 @@ def test_haversine_distance(): d_pred = haversine_distance(x, y) # same distance +/- 3 km assert_allclose(d_ref, d_pred, atol=3) + + +def test_open_extract_url(httpserver): + download_url = "/fr.txt" + + # check download of uncompressed files + httpserver.expect_oneshot_request(download_url).respond_with_json({"a": 1}) + with _open_extract_url(httpserver.url_for(download_url), "fr") as fh: + assert json.loads(fh.read()) == {"a": 1} + httpserver.check_assertions() + + # check download of zipped files + # Create an in-memory zip file + answer = b"a=1" + with BytesIO() as fh: + with ZipFile(fh, "w") as fh_zip: + with fh_zip.open("FR.txt", "w") as fh_inner: + fh_inner.write(answer) + fh.seek(0) + res = fh.read() + + download_url = "/fr.zip" + httpserver.expect_oneshot_request(download_url).respond_with_data(res) + + with _open_extract_url(httpserver.url_for(download_url), "fr") as fh: + assert fh.read() == answer + + [email protected]( + "download_url", + [ + "https://download.geonames.org/export/zip/{country}.zip", + "https://symerio.github.io/postal-codes-data/data/" + "geonames/{country}.txt", + ], + ids=["geonames", "gitlab-pages"], +) +def test_cdn(temp_dir, monkeypatch, download_url): + monkeypatch.setattr(pgeocode, "DOWNLOAD_URL", [download_url]) + assert not os.path.exists(os.path.join(temp_dir, "IE.txt")) + Nominatim("IE") + # the data file was downloaded + assert os.path.exists(os.path.join(temp_dir, "IE.txt")) + + +def test_url_returns_404(httpserver, monkeypatch, temp_dir): + download_url = "/fr.gzip" + httpserver.expect_oneshot_request(download_url).respond_with_data( + "", status=404 + ) + + monkeypatch.setattr( + pgeocode, "DOWNLOAD_URL", [httpserver.url_for(download_url)] + ) + # Nominatim("fr") + with pytest.raises(urllib.error.HTTPError, match="HTTP Error 404"): + Nominatim("fr") + httpserver.check_assertions() + + +def test_first_url_fails(httpserver, monkeypatch, temp_dir): + download_url = "/IE.txt" + httpserver.expect_oneshot_request(download_url).respond_with_data( + "", status=404 + ) + + monkeypatch.setattr( + pgeocode, + "DOWNLOAD_URL", + [ + httpserver.url_for(download_url), + "https://symerio.github.io/postal-codes-data/data/" + "geonames/{country}.txt", + ], + ) + msg = "IE.txt failed with: HTTP Error 404.*Trying next URL" + with pytest.warns(UserWarning, match=msg): + Nominatim("ie") + httpserver.check_assertions()
Support alternate download locations It might be useful to support alternate download locations in case GeoNames website goes down. This would also help reproducibility (I'm not sure how often GeoNames database is updated and if that is tracked somewhere). This would require storing the data somewhere. One possibility for free hosting could be to attach it to Github releases. For instance, maybe @zaro's implementation in https://github.com/zaro/pgeocode/commit/6a3c743bee8fd67ae6ec82c87e0d6cbfefa62110 could be adapted.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test_pgeocode.py::test_countries[FR-91120-Palaiseau-67000-Strasbourg-400]", "test_pgeocode.py::test_countries[GB-WC2N", "test_pgeocode.py::test_countries[AU-6837-Perth-3000-melbourne-2722]", "test_pgeocode.py::test_countries[AU-6837-Perth-0221-Barton-3089]", "test_pgeocode.py::test_countries[US-60605-Chicago-94103-San", "test_pgeocode.py::test_countries[CA-M5R", "test_pgeocode.py::test_download_dataset", "test_pgeocode.py::test_nominatim_query_postal_code", "test_pgeocode.py::test_nominatim_query_postal_code_multiple", "test_pgeocode.py::test_nominatim_distance_postal_code", "test_pgeocode.py::test_cdn[geonames]", "test_pgeocode.py::test_cdn[gitlab-pages]" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2020-10-23T22:01:54Z"
bsd-3-clause
symerio__pgeocode-62
diff --git a/CHANGELOG.md b/CHANGELOG.md index 683a3c0..088341d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,12 @@ - The minimum supported Python version is updated to Python 3.8 [#65](https://github.com/symerio/pgeocode/pull/65) + - Fix error in latitude grouping when creating a unique postcode index. + With this fix `Nominatim(.., unique=True)` correctly computes the average + latitude for each postcode (if multiple localities share the same postcode), + instead of taking the first latitude value. + [#62](https://github.com/symerio/pgeocode/pull/62) + - The default folder to store downloaded data is changed to `~/.cache/pgeocode/`. This default can still be changed by setting the `PGEOCODE_DATA_DIR` environment variable. [#51](https://github.com/symerio/pgeocode/pull/51) diff --git a/pgeocode.py b/pgeocode.py index 65f8ffd..4f9aab3 100644 --- a/pgeocode.py +++ b/pgeocode.py @@ -252,7 +252,7 @@ class Nominatim: df_unique_cp_group = self._data.groupby("postal_code") data_unique = df_unique_cp_group[["latitude", "longitude"]].mean() valid_keys = set(DATA_FIELDS).difference( - ["place_name", "lattitude", "longitude", "postal_code"] + ["place_name", "latitude", "longitude", "postal_code"] ) data_unique["place_name"] = df_unique_cp_group["place_name"].apply( lambda x: ", ".join([str(el) for el in x])
symerio/pgeocode
fda231859ae17c7282a9d90c0e2b5b3cde1eb01d
diff --git a/test_pgeocode.py b/test_pgeocode.py index b6fe453..1bfdcbf 100644 --- a/test_pgeocode.py +++ b/test_pgeocode.py @@ -261,3 +261,60 @@ def test_first_url_fails(httpserver, monkeypatch, temp_dir): with pytest.warns(UserWarning, match=msg): Nominatim("ie") httpserver.check_assertions() + + +def test_unique_index_pcode(tmp_path): + """Check that a centroid is computed both for latitude and longitude + + Regression test for https://github.com/symerio/pgeocode/pull/62 + """ + + class MockNominatim(Nominatim): + def __init__(self): + pass + + data = pd.DataFrame( + { + "postal_code": ["1", "1", "2", "2"], + "latitude": [1.0, 2.0, 3.0, 4], + "longitude": [5.0, 6.0, 7.0, 8], + "place_name": ["a", "b", "c", "d"], + "state_name": ["a", "b", "c", "d"], + "country_name": ["a", "b", "c", "d"], + "county_name": ["a", "b", "c", "d"], + "community_name": ["a", "b", "c", "d"], + "accuracy": [1, 2, 3, 4], + "country_code": [1, 2, 3, 4], + "county_code": [1, 2, 3, 4], + "state_code": [1, 2, 3, 4], + "community_code": [1, 2, 3, 4], + } + ) + + nominatim = MockNominatim() + data_path = tmp_path / "a.txt" + nominatim._data_path = str(data_path) + nominatim._data = data + data_unique = nominatim._index_postal_codes() + + data_unique_expected = pd.DataFrame( + { + "postal_code": ["1", "2"], + "latitude": [1.5, 3.5], + "longitude": [5.5, 7.5], + "place_name": ["a, b", "c, d"], + "state_name": ["a", "c"], + # We don't include the country_name for some reason? + # 'country_name': ['a', 'c'], + "county_name": ["a", "c"], + "community_name": ["a", "c"], + "accuracy": [1, 3], + "country_code": [1, 3], + "county_code": [1, 3], + "state_code": [1, 3], + "community_code": [1, 3], + } + ) + pd.testing.assert_frame_equal( + data_unique.sort_index(axis=1), data_unique_expected.sort_index(axis=1) + )
incorrect centroid in query_postal_code for duplicate postal code entries query_postal_code sums the longitude. nomi.query_postal_code("41-800") Thats from GEOName file: 41-800 will return you 2 locations: PL, 41-800, 50.2817, 18.6745 PL,41-800, 50.3055, 18.778 After running: nomi.query_postal_code("41-800") postal_code 41-800 place_name Gliwice, Zabrze latitude 50.2817 longitude 18.7263 and the longitude = SUM of the locations from file / number of results.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test_pgeocode.py::test_unique_index_pcode" ]
[ "test_pgeocode.py::test_countries[FR-91120-Palaiseau-67000-Strasbourg-400]", "test_pgeocode.py::test_countries[GB-WC2N", "test_pgeocode.py::test_countries[AU-6837-Perth-3000-melbourne-2722]", "test_pgeocode.py::test_countries[AU-6837-Perth-0221-Barton-3089]", "test_pgeocode.py::test_countries[US-60605-Chicago-94103-San", "test_pgeocode.py::test_countries[CA-M5R", "test_pgeocode.py::test_download_dataset", "test_pgeocode.py::test_nominatim_query_postal_code", "test_pgeocode.py::test_nominatim_query_postal_code_multiple", "test_pgeocode.py::test_nominatim_distance_postal_code", "test_pgeocode.py::test_cdn[geonames]", "test_pgeocode.py::test_cdn[gitlab-pages]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2022-06-16T08:54:33Z"
bsd-3-clause
syrusakbary__snapshottest-133
diff --git a/.travis.yml b/.travis.yml index 9494339..578c891 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,6 @@ language: python sudo: false python: -- 2.7 -- 3.4 - 3.5 - 3.6 - 3.7 diff --git a/setup.py b/setup.py index 1229d68..a18b9b0 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ from setuptools import setup, find_packages with open("README.md") as f: readme = f.read() -tests_require = ["six", "pytest>=4.6", "pytest-cov", "nose", "django>=1.10.6"] +tests_require = ["pytest>=4.6", "pytest-cov", "nose", "django>=1.10.6"] setup( name="snapshottest", @@ -23,7 +23,7 @@ setup( ], "nose.plugins.0.10": ["snapshottest = snapshottest.nose:SnapshotTestPlugin"], }, - install_requires=["six>=1.10.0", "termcolor", "fastdiff>=0.1.4,<1"], + install_requires=["termcolor", "fastdiff>=0.1.4,<1"], tests_require=tests_require, extras_require={ "test": tests_require, @@ -34,21 +34,16 @@ setup( "nose", ], }, + requires_python=">=3.5", classifiers=[ "Development Status :: 5 - Production/Stable", + "Framework :: Django", "Framework :: Pytest", "Intended Audience :: Developers", "Operating System :: OS Independent", - "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Testing", + "Topic :: Software Development :: Testing :: Unit", ], license="MIT", packages=find_packages(exclude=("tests",)),
syrusakbary/snapshottest
9818a7678b3998fcc67634fc86a427d68692c091
diff --git a/snapshottest/django.py b/snapshottest/django.py index 298fd5f..9d20b9c 100644 --- a/snapshottest/django.py +++ b/snapshottest/django.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import from django.test import TestCase as dTestCase from django.test import SimpleTestCase as dSimpleTestCase from django.test.runner import DiscoverRunner diff --git a/snapshottest/error.py b/snapshottest/error.py index 5cd1fd7..da0ff8a 100644 --- a/snapshottest/error.py +++ b/snapshottest/error.py @@ -1,6 +1,3 @@ -from __future__ import unicode_literals - - class SnapshotError(Exception): pass diff --git a/snapshottest/formatters.py b/snapshottest/formatters.py index 089209f..39a0644 100644 --- a/snapshottest/formatters.py +++ b/snapshottest/formatters.py @@ -1,5 +1,4 @@ import math -import six from collections import defaultdict from .sorted_dict import SortedDict @@ -168,7 +167,7 @@ def default_formatters(): CollectionFormatter(list, format_list), CollectionFormatter(set, format_set), CollectionFormatter(frozenset, format_frozenset), - TypeFormatter(six.string_types, format_str), + TypeFormatter((str,), format_str), TypeFormatter((float,), format_float), TypeFormatter((int, complex, bool, bytes), format_std_type), GenericFormatter(), diff --git a/snapshottest/nose.py b/snapshottest/nose.py index 371734d..9d0e6b4 100644 --- a/snapshottest/nose.py +++ b/snapshottest/nose.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import import logging import os diff --git a/snapshottest/pytest.py b/snapshottest/pytest.py index 2d40ca6..5b28898 100644 --- a/snapshottest/pytest.py +++ b/snapshottest/pytest.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import import pytest import re diff --git a/snapshottest/unittest.py b/snapshottest/unittest.py index b68fce7..535b24a 100644 --- a/snapshottest/unittest.py +++ b/snapshottest/unittest.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import import unittest import inspect diff --git a/tests/test_formatter.py b/tests/test_formatter.py index 8c53056..2b43f0a 100644 --- a/tests/test_formatter.py +++ b/tests/test_formatter.py @@ -1,14 +1,10 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals - import pytest -import six from math import isnan from snapshottest.formatter import Formatter -if not six.PY2: - import unittest.mock +import unittest.mock @pytest.mark.parametrize( @@ -33,45 +29,33 @@ def test_text_formatting(text_value, expected): formatted = formatter(text_value) assert formatted == expected - if six.PY2: - # Also check that Python 2 str value formats the same as the unicode value. - # (If a test case raises UnicodeEncodeError in here, it should be moved to - # the non_ascii verson of this test, below.) - py2_str_value = text_value.encode("ASCII") - py2_str_formatted = formatter(py2_str_value) - assert py2_str_formatted == expected - -# When unicode snapshots are saved in Python 2, there's no easy way to generate -# a clean unicode_literals repr that doesn't use escape sequences. But the -# resulting snapshots are still valid on Python 3 (and vice versa). @pytest.mark.parametrize( - "text_value, expected_py3, expected_py2", + "text_value, expected", [ - ("encodage précis", "'encodage précis'", "'encodage pr\\xe9cis'"), - ("精确的编码", "'精确的编码'", "'\\u7cbe\\u786e\\u7684\\u7f16\\u7801'"), + ("encodage précis", "'encodage précis'"), + ("精确的编码", "'精确的编码'"), # backslash [unicode repr can't just be `"u'{}'".format(value)`] - ("omvänt\\snedstreck", "'omvänt\\\\snedstreck'", "'omv\\xe4nt\\\\snedstreck'"), + ("omvänt\\snedstreck", "'omvänt\\\\snedstreck'"), # multiline - ("ett\ntvå\n", "'''ett\ntvå\n'''", "'''ett\ntv\\xe5\n'''"), + ("ett\ntvå\n", "'''ett\ntvå\n'''"), ], ) -def test_non_ascii_text_formatting(text_value, expected_py3, expected_py2): - expected = expected_py2 if six.PY2 else expected_py3 +def test_non_ascii_text_formatting(text_value, expected): formatter = Formatter() formatted = formatter(text_value) assert formatted == expected -if not six.PY2: - # https://github.com/syrusakbary/snapshottest/issues/115 - def test_can_normalize_unittest_mock_call_object(): - formatter = Formatter() - print(formatter.normalize(unittest.mock.call(1, 2, 3))) +# https://github.com/syrusakbary/snapshottest/issues/115 +def test_can_normalize_unittest_mock_call_object(): + formatter = Formatter() + print(formatter.normalize(unittest.mock.call(1, 2, 3))) + - def test_can_normalize_iterator_objects(): - formatter = Formatter() - print(formatter.normalize(x for x in range(3))) +def test_can_normalize_iterator_objects(): + formatter = Formatter() + print(formatter.normalize(x for x in range(3))) @pytest.mark.parametrize( diff --git a/tests/test_module.py b/tests/test_module.py index cef2207..5ad2758 100644 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - import pytest from snapshottest import Snapshot diff --git a/tests/test_snapshot_test.py b/tests/test_snapshot_test.py index 9249478..9084f87 100644 --- a/tests/test_snapshot_test.py +++ b/tests/test_snapshot_test.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - import pytest from collections import OrderedDict diff --git a/tests/test_sorted_dict.py b/tests/test_sorted_dict.py index b8217d8..41ff194 100644 --- a/tests/test_sorted_dict.py +++ b/tests/test_sorted_dict.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals import enum import pytest
Require Python 3 Since Python 2 has been deprecated, I suggest we drop support for it the next major release. Many libraries, including developer tools, have done so… pip, pytest, etc. IMO it is not worth spending the limited volunteer development efforts we have to keep this working on an obsolete platform.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_snapshot_test.py::test_snapshot_matches_itself[{'a'," ]
[ "tests/test_formatter.py::test_text_formatting[abc-'abc']", "tests/test_formatter.py::test_text_formatting[-'']", "tests/test_formatter.py::test_text_formatting[back\\\\slash-'back\\\\\\\\slash']", "tests/test_formatter.py::test_text_formatting[it", "tests/test_formatter.py::test_text_formatting[it's", "tests/test_formatter.py::test_text_formatting[one\\ntwo\\n-'''one\\ntwo\\n''']", "tests/test_formatter.py::test_text_formatting[three\\n'''quotes-\"\"\"three\\n'''quotes\"\"\"]", "tests/test_formatter.py::test_text_formatting[so", "tests/test_formatter.py::test_non_ascii_text_formatting[encodage", "tests/test_formatter.py::test_non_ascii_text_formatting[\\u7cbe\\u786e\\u7684\\u7f16\\u7801-'\\u7cbe\\u786e\\u7684\\u7f16\\u7801']", "tests/test_formatter.py::test_non_ascii_text_formatting[omv\\xe4nt\\\\snedstreck-'omv\\xe4nt\\\\\\\\snedstreck']", "tests/test_formatter.py::test_non_ascii_text_formatting[ett\\ntv\\xe5\\n-'''ett\\ntv\\xe5\\n''']", "tests/test_formatter.py::test_can_normalize_unittest_mock_call_object", "tests/test_formatter.py::test_can_normalize_iterator_objects", "tests/test_formatter.py::test_basic_formatting_parsing[0]", "tests/test_formatter.py::test_basic_formatting_parsing[12.7]", "tests/test_formatter.py::test_basic_formatting_parsing[True]", "tests/test_formatter.py::test_basic_formatting_parsing[False]", "tests/test_formatter.py::test_basic_formatting_parsing[None]", "tests/test_formatter.py::test_basic_formatting_parsing[-inf]", "tests/test_formatter.py::test_basic_formatting_parsing[inf]", "tests/test_formatter.py::test_formatting_parsing_nan", "tests/test_module.py::TestSnapshotModuleLoading::test_load_not_yet_saved", "tests/test_module.py::TestSnapshotModuleLoading::test_load_missing_package", "tests/test_module.py::TestSnapshotModuleLoading::test_load_corrupted_snapshot", "tests/test_snapshot_test.py::test_snapshot_matches_itself['abc']", "tests/test_snapshot_test.py::test_snapshot_matches_itself[b'abc']", "tests/test_snapshot_test.py::test_snapshot_matches_itself[123]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[123.456]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[{'a':", "tests/test_snapshot_test.py::test_snapshot_matches_itself[['a',", "tests/test_snapshot_test.py::test_snapshot_matches_itself[('a',", "tests/test_snapshot_test.py::test_snapshot_matches_itself[('a',)]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[None]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[False]", "tests/test_snapshot_test.py::test_snapshot_matches_itself['']", "tests/test_snapshot_test.py::test_snapshot_matches_itself[b'']", "tests/test_snapshot_test.py::test_snapshot_matches_itself[{}]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[[]]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[set()]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[()]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[0]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[0.0]", "tests/test_snapshot_test.py::test_snapshot_matches_itself[OrderedDict([('a',", "tests/test_snapshot_test.py::test_snapshot_matches_itself[OrderedDict([('c',", "tests/test_snapshot_test.py::test_snapshot_does_not_match_other_values[snapshot", "tests/test_sorted_dict.py::test_sorted_dict[key1-value]", "tests/test_sorted_dict.py::test_sorted_dict[key2-42]", "tests/test_sorted_dict.py::test_sorted_dict[key3-value2]", "tests/test_sorted_dict.py::test_sorted_dict[key4-value3]", "tests/test_sorted_dict.py::test_sorted_dict[key5-value4]", "tests/test_sorted_dict.py::test_sorted_dict[key6-value5]", "tests/test_sorted_dict.py::test_sorted_dict[key7-value6]", "tests/test_sorted_dict.py::test_sorted_dict[key8-value7]", "tests/test_sorted_dict.py::test_sorted_dict_string_key", "tests/test_sorted_dict.py::test_sorted_dict_int_key", "tests/test_sorted_dict.py::test_sorted_dict_intenum", "tests/test_sorted_dict.py::test_sorted_dict_enum", "tests/test_sorted_dict.py::test_sorted_dict_enum_value", "tests/test_sorted_dict.py::test_sorted_dict_enum_key" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-09-29T23:35:10Z"
mit
tableau__document-api-python-15
diff --git a/tableaudocumentapi/datasource.py b/tableaudocumentapi/datasource.py index 93ebe55..617004a 100644 --- a/tableaudocumentapi/datasource.py +++ b/tableaudocumentapi/datasource.py @@ -72,7 +72,7 @@ class Datasource(object): """ # save the file - self._datasourceTree.write(self._filename) + self._datasourceTree.write(self._filename, encoding="utf-8", xml_declaration=True) def save_as(self, new_filename): """ @@ -85,7 +85,7 @@ class Datasource(object): Nothing. """ - self._datasourceTree.write(new_filename) + self._datasourceTree.write(new_filename, encoding="utf-8", xml_declaration=True) ########### # name diff --git a/tableaudocumentapi/workbook.py b/tableaudocumentapi/workbook.py index 67dbc32..889f746 100644 --- a/tableaudocumentapi/workbook.py +++ b/tableaudocumentapi/workbook.py @@ -76,7 +76,7 @@ class Workbook(object): """ # save the file - self._workbookTree.write(self._filename) + self._workbookTree.write(self._filename, encoding="utf-8", xml_declaration=True) def save_as(self, new_filename): """ @@ -90,7 +90,7 @@ class Workbook(object): """ - self._workbookTree.write(new_filename) + self._workbookTree.write(new_filename, encoding="utf-8", xml_declaration=True) ########################################################################### #
tableau/document-api-python
07aad9550d3d36a4d74c4751832c50fe81882a01
diff --git a/test.py b/test.py index fd7d1bd..5606005 100644 --- a/test.py +++ b/test.py @@ -17,6 +17,7 @@ TABLEAU_10_WORKBOOK = '''<?xml version='1.0' encoding='utf-8' ?><workbook source TABLEAU_CONNECTION_XML = ET.fromstring( '''<connection authentication='sspi' class='sqlserver' dbname='TestV1' odbc-native-protocol='yes' one-time-sql='' server='mssql2012.test.tsi.lan' username=''></connection>''') + class HelperMethodTests(unittest.TestCase): def test_is_valid_file_with_valid_inputs(self): @@ -39,7 +40,6 @@ class ConnectionParserTests(unittest.TestCase): self.assertIsInstance(connections[0], Connection) self.assertEqual(connections[0].dbname, 'TestV1') - def test_can_extract_federated_connections(self): parser = ConnectionParser(ET.fromstring(TABLEAU_10_TDS), '10.0') connections = parser.get_connections() @@ -97,6 +97,17 @@ class DatasourceModelTests(unittest.TestCase): new_tds = Datasource.from_file(self.tds_file.name) self.assertEqual(new_tds.connections[0].dbname, 'newdb.test.tsi.lan') + def test_save_has_xml_declaration(self): + original_tds = Datasource.from_file(self.tds_file.name) + original_tds.connections[0].dbname = 'newdb.test.tsi.lan' + + original_tds.save() + + with open(self.tds_file.name) as f: + first_line = f.readline().strip() # first line should be xml tag + self.assertEqual( + first_line, "<?xml version='1.0' encoding='utf-8'?>") + class WorkbookModelTests(unittest.TestCase): @@ -122,7 +133,8 @@ class WorkbookModelTests(unittest.TestCase): original_wb.save() new_wb = Workbook(self.workbook_file.name) - self.assertEqual(new_wb.datasources[0].connections[0].dbname, 'newdb.test.tsi.lan') + self.assertEqual(new_wb.datasources[0].connections[ + 0].dbname, 'newdb.test.tsi.lan') class WorkbookModelV10Tests(unittest.TestCase): @@ -152,7 +164,19 @@ class WorkbookModelV10Tests(unittest.TestCase): original_wb.save() new_wb = Workbook(self.workbook_file.name) - self.assertEqual(new_wb.datasources[0].connections[0].dbname, 'newdb.test.tsi.lan') + self.assertEqual(new_wb.datasources[0].connections[ + 0].dbname, 'newdb.test.tsi.lan') + + def test_save_has_xml_declaration(self): + original_wb = Workbook(self.workbook_file.name) + original_wb.datasources[0].connections[0].dbname = 'newdb.test.tsi.lan' + + original_wb.save() + + with open(self.workbook_file.name) as f: + first_line = f.readline().strip() # first line should be xml tag + self.assertEqual( + first_line, "<?xml version='1.0' encoding='utf-8'?>") if __name__ == '__main__': unittest.main()
Tabcmd publish with .twb created via Document API I can successfully create a .twb file via the Document API, but attempting to publish it to my Tableau Server via Tabcmd results in an unexpected error: **Bad request unexpected error occurred opening the packaged workbook.** Attached is the template workbook created in Tableau Desktop (superstore_sales.twb) and one of the workbooks created from that template via the Document API (superstore_sales_arizona.twb) [superstore_twbs.zip](https://github.com/tableau/document-api-python/files/285303/superstore_twbs.zip)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test.py::DatasourceModelTests::test_save_has_xml_declaration", "test.py::WorkbookModelV10Tests::test_save_has_xml_declaration" ]
[ "test.py::HelperMethodTests::test_is_valid_file_with_invalid_inputs", "test.py::HelperMethodTests::test_is_valid_file_with_valid_inputs", "test.py::ConnectionParserTests::test_can_extract_federated_connections", "test.py::ConnectionParserTests::test_can_extract_legacy_connection", "test.py::ConnectionModelTests::test_can_read_attributes_from_connection", "test.py::ConnectionModelTests::test_can_write_attributes_to_connection", "test.py::DatasourceModelTests::test_can_extract_connection", "test.py::DatasourceModelTests::test_can_extract_datasource_from_file", "test.py::DatasourceModelTests::test_can_save_tds", "test.py::WorkbookModelTests::test_can_extract_datasource", "test.py::WorkbookModelTests::test_can_update_datasource_connection_and_save", "test.py::WorkbookModelV10Tests::test_can_extract_datasourceV10", "test.py::WorkbookModelV10Tests::test_can_update_datasource_connection_and_saveV10" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-06-02T00:21:16Z"
mit
tableau__server-client-python-109
diff --git a/tableauserverclient/models/user_item.py b/tableauserverclient/models/user_item.py index 1e4f54a..2df6764 100644 --- a/tableauserverclient/models/user_item.py +++ b/tableauserverclient/models/user_item.py @@ -119,7 +119,7 @@ class UserItem(object): @classmethod def from_response(cls, resp): - all_user_items = set() + all_user_items = [] parsed_response = ET.fromstring(resp) all_user_xml = parsed_response.findall('.//t:user', namespaces=NAMESPACE) for user_xml in all_user_xml: @@ -128,7 +128,7 @@ class UserItem(object): user_item = cls(name, site_role) user_item._set_values(id, name, site_role, last_login, external_auth_user_id, fullname, email, auth_setting, domain_name) - all_user_items.add(user_item) + all_user_items.append(user_item) return all_user_items @staticmethod
tableau/server-client-python
e853d7c79f54f232c9f1da07f6c085db399e598a
diff --git a/test/test_user.py b/test/test_user.py index 556cd62..fa83443 100644 --- a/test/test_user.py +++ b/test/test_user.py @@ -54,7 +54,7 @@ class UserTests(unittest.TestCase): all_users, pagination_item = self.server.users.get() self.assertEqual(0, pagination_item.total_available) - self.assertEqual(set(), all_users) + self.assertEqual([], all_users) def test_get_before_signin(self): self.server._auth_token = None
Pager with users throws TypeError I am trying to extract the list of users using the Pager: `print(*TSC.Pager(tableau.users))` I get the following error: ` File "metalab_users.py", line 74, in <module> print(*tableau_users) File "C:\Program Files\Python35\lib\site-packages\tableauserverclient\server\pager.py", line 30, in __iter__ yield current_item_list.pop(0) TypeError: pop() takes no arguments (1 given)` When calling projects with the same code, I get no such error: `print(*TSC.Pager(tableau.projects))`
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_user.py::UserTests::test_get_empty" ]
[ "test/test_user.py::UserTests::test_update_missing_id", "test/test_user.py::UserTests::test_get_by_id_missing_id", "test/test_user.py::UserTests::test_get_before_signin", "test/test_user.py::UserTests::test_update", "test/test_user.py::UserTests::test_add", "test/test_user.py::UserTests::test_populate_workbooks_missing_id", "test/test_user.py::UserTests::test_get_by_id", "test/test_user.py::UserTests::test_remove", "test/test_user.py::UserTests::test_get", "test/test_user.py::UserTests::test_populate_workbooks", "test/test_user.py::UserTests::test_remove_missing_id" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2016-12-02T04:38:26Z"
mit
tableau__server-client-python-1117
diff --git a/samples/create_group.py b/samples/create_group.py index 50d84a1..d5cf712 100644 --- a/samples/create_group.py +++ b/samples/create_group.py @@ -46,7 +46,7 @@ def main(): logging.basicConfig(level=logging_level) tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site) - server = TSC.Server(args.server, use_server_version=True) + server = TSC.Server(args.server, use_server_version=True, http_options={"verify": False}) with server.auth.sign_in(tableau_auth): # this code shows 3 different error codes that mean "resource is already in collection" # 409009: group already exists on server diff --git a/tableauserverclient/server/endpoint/endpoint.py b/tableauserverclient/server/endpoint/endpoint.py index 378c847..a7b3306 100644 --- a/tableauserverclient/server/endpoint/endpoint.py +++ b/tableauserverclient/server/endpoint/endpoint.py @@ -11,9 +11,12 @@ from .exceptions import ( NonXMLResponseError, EndpointUnavailableError, ) -from .. import endpoint from ..query import QuerySet from ... import helpers +from ..._version import get_versions + +__TSC_VERSION__ = get_versions()["version"] +del get_versions logger = logging.getLogger("tableau.endpoint") @@ -22,34 +25,25 @@ Success_codes = [200, 201, 202, 204] XML_CONTENT_TYPE = "text/xml" JSON_CONTENT_TYPE = "application/json" +USERAGENT_HEADER = "User-Agent" + if TYPE_CHECKING: from ..server import Server from requests import Response -_version_header: Optional[str] = None - - class Endpoint(object): def __init__(self, parent_srv: "Server"): - global _version_header self.parent_srv = parent_srv @staticmethod def _make_common_headers(auth_token, content_type): - global _version_header - - if not _version_header: - from ..server import __TSC_VERSION__ - - _version_header = __TSC_VERSION__ - headers = {} if auth_token is not None: headers["x-tableau-auth"] = auth_token if content_type is not None: headers["content-type"] = content_type - headers["User-Agent"] = "Tableau Server Client/{}".format(_version_header) + headers["User-Agent"] = "Tableau Server Client/{}".format(__TSC_VERSION__) return headers def _make_request( @@ -62,9 +56,9 @@ class Endpoint(object): parameters: Optional[Dict[str, Any]] = None, ) -> "Response": parameters = parameters or {} - parameters.update(self.parent_srv.http_options) if "headers" not in parameters: parameters["headers"] = {} + parameters.update(self.parent_srv.http_options) parameters["headers"].update(Endpoint._make_common_headers(auth_token, content_type)) if content is not None: diff --git a/tableauserverclient/server/server.py b/tableauserverclient/server/server.py index c82f4a6..18f5834 100644 --- a/tableauserverclient/server/server.py +++ b/tableauserverclient/server/server.py @@ -37,11 +37,6 @@ from .exceptions import NotSignedInError from ..namespace import Namespace -from .._version import get_versions - -__TSC_VERSION__ = get_versions()["version"] -del get_versions - _PRODUCT_TO_REST_VERSION = { "10.0": "2.3", "9.3": "2.2", @@ -51,7 +46,6 @@ _PRODUCT_TO_REST_VERSION = { } minimum_supported_server_version = "2.3" default_server_version = "2.3" -client_version_header = "X-TableauServerClient-Version" class Server(object): @@ -98,23 +92,29 @@ class Server(object): # must set this before calling use_server_version, because that's a server call if http_options: self.add_http_options(http_options) - self.add_http_version_header() if use_server_version: self.use_server_version() - def add_http_options(self, options_dict): - self._http_options.update(options_dict) - if options_dict.get("verify") == False: + def add_http_options(self, option_pair: dict): + if not option_pair: + # log debug message + return + if len(option_pair) != 1: + raise ValueError( + "Update headers one at a time. Expected type: ", + {"key": 12}.__class__, + "Actual type: ", + option_pair, + option_pair.__class__, + ) + self._http_options.update(option_pair) + if "verify" in option_pair.keys() and self._http_options.get("verify") is False: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - - def add_http_version_header(self): - if not self._http_options[client_version_header]: - self._http_options.update({client_version_header: __TSC_VERSION__}) + # would be nice if you could turn them back on def clear_http_options(self): self._http_options = dict() - self.add_http_version_header() def _clear_auth(self): self._site_id = None
tableau/server-client-python
f653e15b582ae2ea7fc76f423f178d430e2a30ed
diff --git a/test/http/test_http_requests.py b/test/http/test_http_requests.py new file mode 100644 index 0000000..5759b1c --- /dev/null +++ b/test/http/test_http_requests.py @@ -0,0 +1,56 @@ +import tableauserverclient as TSC +import unittest +from requests.exceptions import MissingSchema + + +class ServerTests(unittest.TestCase): + def test_init_server_model_empty_throws(self): + with self.assertRaises(TypeError): + server = TSC.Server() + + def test_init_server_model_bad_server_name_complains(self): + # by default, it will just set the version to 2.3 + server = TSC.Server("fake-url") + + def test_init_server_model_valid_server_name_works(self): + # by default, it will just set the version to 2.3 + server = TSC.Server("http://fake-url") + + def test_init_server_model_valid_https_server_name_works(self): + # by default, it will just set the version to 2.3 + server = TSC.Server("https://fake-url") + + def test_init_server_model_bad_server_name_not_version_check(self): + # by default, it will just set the version to 2.3 + server = TSC.Server("fake-url", use_server_version=False) + + def test_init_server_model_bad_server_name_do_version_check(self): + with self.assertRaises(MissingSchema): + server = TSC.Server("fake-url", use_server_version=True) + + def test_init_server_model_bad_server_name_not_version_check_random_options(self): + # by default, it will just set the version to 2.3 + server = TSC.Server("fake-url", use_server_version=False, http_options={"foo": 1}) + + def test_init_server_model_bad_server_name_not_version_check_real_options(self): + # by default, it will attempt to contact the server to check it's version + server = TSC.Server("fake-url", use_server_version=False, http_options={"verify": False}) + + def test_http_options_skip_ssl_works(self): + http_options = {"verify": False} + server = TSC.Server("http://fake-url") + server.add_http_options(http_options) + + # ValueError: dictionary update sequence element #0 has length 1; 2 is required + def test_http_options_multiple_options_fails(self): + http_options_1 = {"verify": False} + http_options_2 = {"birdname": "Parrot"} + server = TSC.Server("http://fake-url") + with self.assertRaises(ValueError): + server.add_http_options([http_options_1, http_options_2]) + + # TypeError: cannot convert dictionary update sequence element #0 to a sequence + def test_http_options_not_sequence_fails(self): + server = TSC.Server("http://fake-url") + with self.assertRaises(ValueError): + server.add_http_options({1, 2, 3})
Sign in with http_options set will fail **Describe the bug** Signing in to a server when any http_options have been set in the TSC.Server object will result in an exception: KeyError: 'X-TableauServerClient-Version' **Versions** TSC library versions v0.21 and v0.22 have the issue. Version v0.19.1 works correctly. **To Reproduce** Include any http_options while creating the TSC.Server object: ````py server = TSC.Server('https://10ax.online.tableau.com', use_server_version=True, http_options={"verify": False}) server.auth.sign_in(tableau_auth) ```` Workaround: use TSC v0.19.1 instead. **Results** ````py Traceback (most recent call last): File "/Users/brian/github/server-client-python/pat-login.py", line 17, in <module> server = TSC.Server('https://10ax.online.tableau.com', use_server_version=True, http_options={"verify": False}) File "/Users/brian/github/server-client-python/tableauserverclient/server/server.py", line 101, in __init__ self.add_http_version_header() File "/Users/brian/github/server-client-python/tableauserverclient/server/server.py", line 112, in add_http_version_header if not self._http_options[client_version_header]: KeyError: 'X-TableauServerClient-Version' ````
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/http/test_http_requests.py::ServerTests::test_http_options_not_sequence_fails", "test/http/test_http_requests.py::ServerTests::test_init_server_model_bad_server_name_not_version_check_random_options", "test/http/test_http_requests.py::ServerTests::test_init_server_model_bad_server_name_not_version_check_real_options" ]
[ "test/http/test_http_requests.py::ServerTests::test_http_options_multiple_options_fails", "test/http/test_http_requests.py::ServerTests::test_http_options_skip_ssl_works", "test/http/test_http_requests.py::ServerTests::test_init_server_model_bad_server_name_complains", "test/http/test_http_requests.py::ServerTests::test_init_server_model_bad_server_name_do_version_check", "test/http/test_http_requests.py::ServerTests::test_init_server_model_bad_server_name_not_version_check", "test/http/test_http_requests.py::ServerTests::test_init_server_model_empty_throws", "test/http/test_http_requests.py::ServerTests::test_init_server_model_valid_https_server_name_works", "test/http/test_http_requests.py::ServerTests::test_init_server_model_valid_server_name_works" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2022-09-22T06:57:51Z"
mit
tableau__server-client-python-1345
diff --git a/.gitignore b/.gitignore index e9bd2b4..92778cd 100644 --- a/.gitignore +++ b/.gitignore @@ -155,3 +155,4 @@ $RECYCLE.BIN/ docs/_site/ docs/.jekyll-metadata docs/Gemfile.lock +samples/credentials diff --git a/contributing.md b/contributing.md index 41c339c..6404611 100644 --- a/contributing.md +++ b/contributing.md @@ -10,8 +10,7 @@ Contribution can include, but are not limited to, any of the following: * Fix an Issue/Bug * Add/Fix documentation -Contributions must follow the guidelines outlined on the [Tableau Organization](http://tableau.github.io/) page, though filing an issue or requesting -a feature do not require the CLA. +Contributions must follow the guidelines outlined on the [Tableau Organization](http://tableau.github.io/) page, though filing an issue or requesting a feature do not require the CLA. ## Issues and Feature Requests diff --git a/samples/getting_started/3_hello_universe.py b/samples/getting_started/3_hello_universe.py index 3ed39fd..21de978 100644 --- a/samples/getting_started/3_hello_universe.py +++ b/samples/getting_started/3_hello_universe.py @@ -62,11 +62,6 @@ def main(): print("{} jobs".format(pagination.total_available)) print(jobs[0]) - metrics, pagination = server.metrics.get() - if metrics: - print("{} metrics".format(pagination.total_available)) - print(metrics[0]) - schedules, pagination = server.schedules.get() if schedules: print("{} schedules".format(pagination.total_available)) @@ -82,7 +77,7 @@ def main(): print("{} webhooks".format(pagination.total_available)) print(webhooks[0]) - users, pagination = server.metrics.get() + users, pagination = server.users.get() if users: print("{} users".format(pagination.total_available)) print(users[0]) @@ -92,5 +87,6 @@ def main(): print("{} groups".format(pagination.total_available)) print(groups[0]) - if __name__ == "__main__": - main() + +if __name__ == "__main__": + main() diff --git a/tableauserverclient/models/interval_item.py b/tableauserverclient/models/interval_item.py index f2f1596..537e6c1 100644 --- a/tableauserverclient/models/interval_item.py +++ b/tableauserverclient/models/interval_item.py @@ -69,7 +69,7 @@ class HourlyInterval(object): @interval.setter def interval(self, intervals): - VALID_INTERVALS = {0.25, 0.5, 1, 2, 4, 6, 8, 12} + VALID_INTERVALS = {0.25, 0.5, 1, 2, 4, 6, 8, 12, 24} for interval in intervals: # if an hourly interval is a string, then it is a weekDay interval if isinstance(interval, str) and not interval.isnumeric() and not hasattr(IntervalItem.Day, interval): diff --git a/tableauserverclient/server/endpoint/endpoint.py b/tableauserverclient/server/endpoint/endpoint.py index 77a7712..2b7f570 100644 --- a/tableauserverclient/server/endpoint/endpoint.py +++ b/tableauserverclient/server/endpoint/endpoint.py @@ -1,5 +1,3 @@ -from threading import Thread -from time import sleep from tableauserverclient import datetime_helpers as datetime from packaging.version import Version @@ -76,55 +74,20 @@ class Endpoint(object): return parameters def _blocking_request(self, method, url, parameters={}) -> Optional[Union["Response", Exception]]: - self.async_response = None response = None logger.debug("[{}] Begin blocking request to {}".format(datetime.timestamp(), url)) try: response = method(url, **parameters) - self.async_response = response logger.debug("[{}] Call finished".format(datetime.timestamp())) except Exception as e: logger.debug("Error making request to server: {}".format(e)) - self.async_response = e - finally: - if response and not self.async_response: - logger.debug("Request response not saved") - return None - logger.debug("[{}] Request complete".format(datetime.timestamp())) - return self.async_response + raise e + return response def send_request_while_show_progress_threaded( self, method, url, parameters={}, request_timeout=None ) -> Optional[Union["Response", Exception]]: - try: - request_thread = Thread(target=self._blocking_request, args=(method, url, parameters)) - request_thread.start() - except Exception as e: - logger.debug("Error starting server request on separate thread: {}".format(e)) - return None - seconds = 0.05 - minutes = 0 - last_log_minute = 0 - sleep(seconds) - if self.async_response is not None: - # a quick return for any immediate responses - return self.async_response - timed_out: bool = request_timeout is not None and seconds > request_timeout - while (self.async_response is None) and not timed_out: - sleep(DELAY_SLEEP_SECONDS) - seconds = seconds + DELAY_SLEEP_SECONDS - minutes = int(seconds / 60) - last_log_minute = self.log_wait_time(minutes, last_log_minute, url) - return self.async_response - - def log_wait_time(self, minutes, last_log_minute, url) -> int: - logger.debug("{} Waiting....".format(datetime.timestamp())) - if minutes > last_log_minute: # detailed log message ~every minute - logger.info("[{}] Waiting ({} minutes so far) for request to {}".format(datetime.timestamp(), minutes, url)) - last_log_minute = minutes - else: - logger.debug("[{}] Waiting for request to {}".format(datetime.timestamp(), url)) - return last_log_minute + return self._blocking_request(method, url, parameters) def _make_request( self,
tableau/server-client-python
00c767786f84509bb9aaa6bc2d35b669dd6a2c4b
diff --git a/test/test_endpoint.py b/test/test_endpoint.py index 3d2d1c9..8635af9 100644 --- a/test/test_endpoint.py +++ b/test/test_endpoint.py @@ -1,4 +1,6 @@ from pathlib import Path +import pytest +import requests import unittest import tableauserverclient as TSC @@ -35,11 +37,12 @@ class TestEndpoint(unittest.TestCase): ) self.assertIsNotNone(response) - def test_blocking_request_returns(self) -> None: - url = "http://test/" - endpoint = TSC.server.Endpoint(self.server) - response = endpoint._blocking_request(endpoint.parent_srv.session.get, url=url) - self.assertIsNotNone(response) + def test_blocking_request_raises_request_error(self) -> None: + with pytest.raises(requests.exceptions.ConnectionError): + url = "http://test/" + endpoint = TSC.server.Endpoint(self.server) + response = endpoint._blocking_request(endpoint.parent_srv.session.get, url=url) + self.assertIsNotNone(response) def test_get_request_stream(self) -> None: url = "http://test/"
Unauthorized Access error when signing out (either explicitly or at the end of a `with` block) Starting with v0.29 I notice many of my test scripts failing with an exception. It seems like a general problem rather than specific to one endpoint. Here's an example: ```py import tableauserverclient as TSC tableau_auth = TSC.PersonalAccessTokenAuth( "xxxxx", "xxxxxxxxxx", "", ) server = TSC.Server("https://devplat.tableautest.com", use_server_version=True) with server.auth.sign_in(tableau_auth): all_wb, pagination_item = server.workbooks.get() print("\nThere are {} workbooks: ".format(pagination_item.total_available)) for wb in all_wb: print(wb.id, wb.name, wb.tags) ``` The script succeeds (printing workbooks), but ends with an exception like this: ``` Traceback (most recent call last): File "/Users/bcantoni/github/server-client-python/getdatasources.py", line 14, in <module> with server.auth.sign_in(tableau_auth): File "/Users/bcantoni/github/server-client-python/tableauserverclient/server/endpoint/auth_endpoint.py", line 27, in __exit__ self._callback() File "/Users/bcantoni/github/server-client-python/tableauserverclient/server/endpoint/endpoint.py", line 291, in wrapper return func(self, *args, **kwargs) File "/Users/bcantoni/github/server-client-python/tableauserverclient/server/endpoint/auth_endpoint.py", line 85, in sign_out self.post_request(url, "") File "/Users/bcantoni/github/server-client-python/tableauserverclient/server/endpoint/endpoint.py", line 248, in post_request return self._make_request( File "/Users/bcantoni/github/server-client-python/tableauserverclient/server/endpoint/endpoint.py", line 165, in _make_request self._check_status(server_response, url) File "/Users/bcantoni/github/server-client-python/tableauserverclient/server/endpoint/endpoint.py", line 186, in _check_status raise NotSignedInError(server_response.content, url) tableauserverclient.server.endpoint.exceptions.NotSignedInError: (b'<?xml version=\'1.0\' encoding=\'UTF-8\'?><tsResponse xmlns="http://tableau.com/api" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://tableau.com/api https://help.tableau.com/samples/en-us/rest_api/ts-api_3_22.xsd"><error code="401002"><summary>Unauthorized Access</summary><detail>Invalid authentication credentials were provided.</detail></error></tsResponse>', 'https://10ax.online.tableau.com/api/3.22/auth/signout') ``` If I switch from using the `with` statement to the older style, it works fine: ```py import tableauserverclient as TSC tableau_auth = TSC.PersonalAccessTokenAuth( "xxxx", "xxxxxxx", "", ) server = TSC.Server("https://devplat.tableautest.com", use_server_version=True) server.auth.sign_in(tableau_auth) all_wb, pagination_item = server.workbooks.get() print("\nThere are {} workbooks: ".format(pagination_item.total_available)) for wb in all_wb: print(wb.id, wb.name, wb.tags) ``` Testing notes: - I tested with same results on both Tableau Cloud and Server. - This seems new to 0.29; switching back to 0.28 solves the issue. - Git bisect points to https://github.com/tableau/server-client-python/pull/1300 as the point this was introduced - In server/endpoint/endpoint.py, changing `seconds = 0.05` back to `seconds = 0` seems to fix it (but I'll admit I don't totally follow the changes in that PR)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_endpoint.py::TestEndpoint::test_blocking_request_raises_request_error" ]
[ "test/test_endpoint.py::TestEndpoint::test_binary_log_truncated", "test/test_endpoint.py::TestEndpoint::test_fallback_request_logic", "test/test_endpoint.py::TestEndpoint::test_get_request_stream", "test/test_endpoint.py::TestEndpoint::test_set_user_agent_from_options", "test/test_endpoint.py::TestEndpoint::test_set_user_agent_from_options_headers", "test/test_endpoint.py::TestEndpoint::test_set_user_agent_when_blank", "test/test_endpoint.py::TestEndpoint::test_user_friendly_request_returns" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-19T09:14:30Z"
mit
tableau__server-client-python-274
diff --git a/samples/download_view_image.py b/samples/download_view_image.py index 2da2320..b95a862 100644 --- a/samples/download_view_image.py +++ b/samples/download_view_image.py @@ -43,7 +43,7 @@ def main(): tableau_auth = TSC.TableauAuth(args.username, password, site_id=site_id) server = TSC.Server(args.server) # The new endpoint was introduced in Version 2.5 - server.version = 2.5 + server.version = "2.5" with server.auth.sign_in(tableau_auth): # Step 2: Query for the view that we want an image of diff --git a/tableauserverclient/server/endpoint/endpoint.py b/tableauserverclient/server/endpoint/endpoint.py index deaa94a..e78b2e0 100644 --- a/tableauserverclient/server/endpoint/endpoint.py +++ b/tableauserverclient/server/endpoint/endpoint.py @@ -27,6 +27,17 @@ class Endpoint(object): return headers + @staticmethod + def _safe_to_log(server_response): + '''Checks if the server_response content is not xml (eg binary image or zip) + and and replaces it with a constant + ''' + ALLOWED_CONTENT_TYPES = ('application/xml',) + if server_response.headers.get('Content-Type', None) not in ALLOWED_CONTENT_TYPES: + return '[Truncated File Contents]' + else: + return server_response.content + def _make_request(self, method, url, content=None, request_object=None, auth_token=None, content_type=None, parameters=None): if request_object is not None: @@ -50,7 +61,7 @@ class Endpoint(object): return server_response def _check_status(self, server_response): - logger.debug(server_response.content) + logger.debug(self._safe_to_log(server_response)) if server_response.status_code not in Success_codes: raise ServerResponseError.from_response(server_response.content, self.parent_srv.namespace)
tableau/server-client-python
86e463810be80c2b562845f7c14b775d604f2a86
diff --git a/test/test_regression_tests.py b/test/test_regression_tests.py index 95bdcea..8958c3c 100644 --- a/test/test_regression_tests.py +++ b/test/test_regression_tests.py @@ -1,8 +1,23 @@ import unittest import tableauserverclient.server.request_factory as factory +from tableauserverclient.server.endpoint import Endpoint class BugFix257(unittest.TestCase): def test_empty_request_works(self): result = factory.EmptyRequest().empty_req() self.assertEqual(b'<tsRequest />', result) + + +class BugFix273(unittest.TestCase): + def test_binary_log_truncated(self): + + class FakeResponse(object): + + headers = {'Content-Type': 'application/octet-stream'} + content = b'\x1337' * 1000 + status_code = 200 + + server_response = FakeResponse() + + self.assertEqual(Endpoint._safe_to_log(server_response), '[Truncated File Contents]')
This log line is overly chatty https://github.com/tableau/server-client-python/blob/608aa7694d0560ea3c8c37b10127b11207e56e8d/tableauserverclient/server/endpoint/endpoint.py#L53 When using server client python to download workbooks or data sources and you've got log_level=Debug, this log line ends up blowing up your logs. It outputs the hexadecimal representation of the entire file you're downloading, which is not very helpful and explodes your log size. Can we remove this line, or only log out the response contents when you're not using the endpoint to download a file?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_regression_tests.py::BugFix273::test_binary_log_truncated" ]
[ "test/test_regression_tests.py::BugFix257::test_empty_request_works" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2018-03-09T02:04:54Z"
mit
tacitvenom__genomics_algo-37
diff --git a/genomics_algo/miscellaneous_algorithms/misc_algos.py b/genomics_algo/miscellaneous_algorithms/misc_algos.py index 0b937b1..9703fcc 100644 --- a/genomics_algo/miscellaneous_algorithms/misc_algos.py +++ b/genomics_algo/miscellaneous_algorithms/misc_algos.py @@ -1,8 +1,13 @@ +from itertools import product import numpy as np -from typing import List, Tuple +from typing import List, Set, Tuple -from genomics_algo.utilities.misc_utilities import get_frequency_map +from genomics_algo.utilities.misc_utilities import ( + get_frequency_map, + validate_bases_in_genome, +) +from genomics_algo.utilities.string_cmp import find_hamming_distance def find_most_freq_k_substring( @@ -56,3 +61,57 @@ def find_minimum_gc_skew_location(genome: str) -> int: gc_skew[index + 1] += genome[index] == "G" gc_skew[index + 1] -= genome[index] == "C" return np.where(gc_skew == gc_skew.min())[0] - 1 + + +def find_frequent_kmers_with_mismatches(genome: str, k: int, d: int) -> Set[str]: + """Determine most frequent k-mers with at most `d` mismatches. + A most frequent k-mer with up to `d` mismatches in `genome` is simply a string pattern maximising + the total number of occurrences of said pattern in `genome` with at most `d` mismatches. + Note that the pattern does not need to actually appear as a substring of `genome`. + >>> find_frequent_kmers_with_mismatches('ACGTTGCATGTCGCATGATGCATGAGAGCT', 4, 1)-{'ATGC', 'GATG', 'ATGT'} + set() + + Parameters + ---------- + genome : str + String representation of genome. + k: int + Length of kmers to find. + d: int + Number of allowed mismatches in kmers. + + Returns + ------- + Set[str] + Set of most frequent kmers with up to d mismatches + """ + + n = len(genome) + chars = {"A", "C", "G", "T"} + # input validation: + validate_bases_in_genome(genome) + if n < k or k < d or d < 0: + raise ValueError( + f"The input values for genome, k and d don't make sense. It must hold: len(genome)>=k, k>=d, d>=0. Received: len(genome)={n}, k={k}, d={d}." + ) + if k > 12 or d > 3: + raise Warning( + f"The large input values k={k} and/or d={d} might cause long run times." + ) + + frequency_map = {} + # FIXME here ALL possible patterns of length k are created -> should be optimised + possible_patterns = ["".join(p) for p in product(chars, repeat=k)] + for i in range(n - k + 1): + pattern = genome[i : i + k] + for kmer in possible_patterns: + if find_hamming_distance(pattern, kmer) <= d: + if kmer in frequency_map.keys(): + frequency_map[kmer] += 1 + else: + frequency_map[kmer] = 1 + + most_frequent = max(frequency_map.values()) + return { + kmer for kmer, frequency in frequency_map.items() if frequency == most_frequent + } diff --git a/genomics_algo/utilities/misc_utilities.py b/genomics_algo/utilities/misc_utilities.py index 4fa1603..c737773 100644 --- a/genomics_algo/utilities/misc_utilities.py +++ b/genomics_algo/utilities/misc_utilities.py @@ -70,3 +70,11 @@ def get_frequency_map(text: str, substring_length: int) -> Dict[str, int]: else: freq_map[substr] = 1 return freq_map + + +def validate_bases_in_genome(genome: str) -> bool: + """Validates a genome string for existing bases. + Raises ``ValueError`` if ``genome`` contains bases other than defined in ``Bases`` class.""" + set_diff = set(genome).difference({Bases.A, Bases.C, Bases.G, Bases.T}) + if not set_diff == set(): + raise ValueError(f"Genome contains invalid bases: {set_diff}")
tacitvenom/genomics_algo
3174c1e9e685db12c5849ce5c7e3411f1922a4be
diff --git a/genomics_algo/tests/miscellaneous_algorithms/test_misc_algos.py b/genomics_algo/tests/miscellaneous_algorithms/test_misc_algos.py index 85c96b7..f1f14a8 100644 --- a/genomics_algo/tests/miscellaneous_algorithms/test_misc_algos.py +++ b/genomics_algo/tests/miscellaneous_algorithms/test_misc_algos.py @@ -5,6 +5,7 @@ from genomics_algo.utilities.read_files import read_genome from genomics_algo.miscellaneous_algorithms.misc_algos import ( find_pattern_clumps, find_minimum_gc_skew_location, + find_frequent_kmers_with_mismatches, ) @@ -53,3 +54,100 @@ def test_find_minimum_gc_skew_location_in_genome(): genome = read_genome("genomics_algo/tests/test_data/e_coli.txt") result = find_minimum_gc_skew_location(genome) np.testing.assert_array_equal([3923619, 3923620, 3923621, 3923622], result) + + +def test_find_frequent_kmers_with_mismatches_raises(): + with pytest.raises(Warning): + find_frequent_kmers_with_mismatches("ACGTTGCAACGTTGCA", 13, 3) + with pytest.raises(Warning): + find_frequent_kmers_with_mismatches("ACGTTGCAACGTTGCA", 12, 4) + with pytest.raises(ValueError) as e: + find_frequent_kmers_with_mismatches("ACGT", 6, -1) + assert "Received: len(genome)=4, k=6, d=-1." in str(e.value) + + [email protected](reason="Takes around 2 minutes to execute") +def test_find_frequent_kmers_with_mismatches_benchmark(): + res = find_frequent_kmers_with_mismatches("ACGTTGCAACGTTGCA", 12, 3) + assert len(res) == 32855 + + +def test_find_frequent_kmers_with_mismatches(): + """Some debug datasets taken from: + http://bioinformaticsalgorithms.com/data/debugdatasets/replication/FrequentWordsWithMismatchesProblem.pdf + """ + + """ Dataset 1 + This dataset checks that the implementation includes k-mers that do not actually appear in Text. + Notice here that, although none of the output k-mers except for AA actually appear in Text, they + are all valid because they appear in Text with up to 1 mismatch (i.e. 0 or 1 mismatch). + """ + genome1 = "AAAAAAAAAA" + k1 = 2 + d1 = 1 + expected1 = {"AA", "AC", "AG", "CA", "AT", "GA", "TA"} + result1 = find_frequent_kmers_with_mismatches(genome1, k1, d1) + assert result1 == expected1 + + """ Dataset 2 + This dataset makes sure that the implementation is not accidentally swapping k and d. + """ + genome2 = "AGTCAGTC" + k2 = 4 + d2 = 2 + expected2 = { + "TCTC", + "CGGC", + "AAGC", + "TGTG", + "GGCC", + "AGGT", + "ATCC", + "ACTG", + "ACAC", + "AGAG", + "ATTA", + "TGAC", + "AATT", + "CGTT", + "GTTC", + "GGTA", + "AGCA", + "CATC", + } + result2 = find_frequent_kmers_with_mismatches(genome2, k2, d2) + assert result2 == expected2 + + """ Dataset 3 + This dataset makes sure you are not finding patterns in the Reverse Complement of genome + """ + genome3 = "AATTAATTGGTAGGTAGGTA" + k3 = 4 + d3 = 0 + expected3 = {"GGTA"} + result3 = find_frequent_kmers_with_mismatches(genome3, k3, d3) + assert result3 == expected3 + + """ Dataset 4 + This dataset first checks that k-mers with exactly d mismatches are being found. Then, it + checks that k-mers with less than d mismatches are being allowed (i.e. you are not only allowing + k-mers with exactly d mismatches). Next, it checks that you are not returning too few k-mers. + Last, it checks that you are not returning too many k-mers. + """ + genome4 = "ATA" + k4 = 3 + d4 = 1 + expected4 = {"GTA", "ACA", "AAA", "ATC", "ATA", "AGA", "ATT", "CTA", "TTA", "ATG"} + result4 = find_frequent_kmers_with_mismatches(genome4, k4, d4) + assert result4 == expected4 + + """ Dataset 5 + This dataset checks that your code is not looking for k-mers in the Reverse Complement + of genome. + """ + genome5 = "AAT" + k5 = 3 + d5 = 0 + expected5 = {"AAT"} + result5 = find_frequent_kmers_with_mismatches(genome5, k5, d5) + assert result5 == expected5
Frequent Words with Mismatches Find the most frequent k-mers with mismatches in a string. Input: A string Text as well as integers k and d. (You may assume k ≤ 12 and d ≤ 3.) Output: All most frequent k-mers with up to d mismatches in Text.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "genomics_algo/tests/miscellaneous_algorithms/test_misc_algos.py::test_find_pattern_clumps_short", "genomics_algo/tests/miscellaneous_algorithms/test_misc_algos.py::test_find_minimum_gc_skew_location", "genomics_algo/tests/miscellaneous_algorithms/test_misc_algos.py::test_find_frequent_kmers_with_mismatches_raises", "genomics_algo/tests/miscellaneous_algorithms/test_misc_algos.py::test_find_frequent_kmers_with_mismatches" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2021-02-02T23:48:56Z"
mit
takahi-i__hideout-36
diff --git a/hideout/file.py b/hideout/file.py index 395e5d0..f204a37 100644 --- a/hideout/file.py +++ b/hideout/file.py @@ -66,11 +66,21 @@ def _generate_file_path_from_label(label): def _generate_file_path_from_func(func, func_args={}): - label = func.__name__ + class_name = _get_class_that_defined_method(func) + label = "{}".format(class_name) for arg_name in func_args: arg_value = str(func_args[arg_name]) if len(arg_value) > 10: arg_value = hashlib.md5(arg_value.encode("utf-8")).hexdigest()[0:10] - print("hashed_value: " + arg_value) label += "-{}-{}".format(arg_name, arg_value) return _generate_file_path_from_label(label) + + +def _get_class_that_defined_method(method): + class_name = "" + names = method.__qualname__.split('.') + for i, attr in enumerate(names): + class_name += "{}".format(attr) + if i != len(names) - 1: + class_name += "-" + return class_name
takahi-i/hideout
e024334f3f124b8df0a9617c3bc8fe8ab7c909aa
diff --git a/tests/test_file.py b/tests/test_file.py index 9e1d842..31ddb08 100644 --- a/tests/test_file.py +++ b/tests/test_file.py @@ -13,24 +13,40 @@ class Generator2: return {"foobar": baz} +class Generator3: + class InnerGenerator: + def generate(self, baz): + return {"foobar": baz} + + def __init__(self) -> None: + self.inner = Generator3.InnerGenerator() + + class TestFile(unittest.TestCase): def test_generate_file_name_with_label(self): self.assertEquals("large_object.pickle", os.path.basename(generate_path( - func=generate, - func_args={"baz": [0, 1, 2, 3, 4, 5, 7, 6, 8, 9, 10]}, - label="large_object"))) + func=generate, + func_args={"baz": [0, 1, 2, 3, 4, 5, 7, 6, 8, 9, 10]}, + label="large_object"))) def test_generate_file_name_from_hash(self): self.assertEquals("generate-baz-6979983cbc.pickle", os.path.basename(generate_path( - func=generate, - func_args={"baz": [0, 1, 2, 3, 4, 5, 7, 6, 8, 9, 10]}))) + func=generate, + func_args={"baz": [0, 1, 2, 3, 4, 5, 7, 6, 8, 9, 10]}))) def test_generate_file_name_from_hash_with_instance(self): generator = Generator2() - self.assertEquals("generate-baz-6979983cbc.pickle", + self.assertEquals("Generator2-generate-baz-6979983cbc.pickle", os.path.basename(generate_path( func=generator.generate, func_args={"baz": [0, 1, 2, 3, 4, 5, 7, 6, 8, 9, 10]}))) + + def test_generate_file_name_from_hash_with_instance_of_inner_class(self): + generator = Generator3() + self.assertEquals("Generator3-InnerGenerator-generate-baz-6979983cbc.pickle", + os.path.basename(generate_path( + func=generator.inner.generate, + func_args={"baz": [0, 1, 2, 3, 4, 5, 7, 6, 8, 9, 10]})))
Get class name for bind method for cache file
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_file.py::TestFile::test_generate_file_name_from_hash_with_instance", "tests/test_file.py::TestFile::test_generate_file_name_from_hash_with_instance_of_inner_class" ]
[ "tests/test_file.py::TestFile::test_generate_file_name_from_hash", "tests/test_file.py::TestFile::test_generate_file_name_with_label" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-08-12T05:22:56Z"
mit
takahi-i__pfm-27
diff --git a/pf_manager/pf_command/add.py b/pf_manager/pf_command/add.py index 31abddf..978a2d3 100644 --- a/pf_manager/pf_command/add.py +++ b/pf_manager/pf_command/add.py @@ -20,11 +20,13 @@ def check_fields(target): def check_local_port_is_used(local_port, targets): + print("local_port: " + local_port); + + for target_name in targets: target = targets[target_name] if local_port == target["local_port"]: - raise RuntimeError("local port " + str(local_port) + " is already used in " + target_name) - + logger.warn("local port {} is already used in {}".format(str(local_port), target_name)) def check_remote_port_is_used(new_target, targets): remote_port = new_target["remote_port"] @@ -35,9 +37,7 @@ def check_remote_port_is_used(new_target, targets): target_remote_host = get_remote_host(target) if target_remote_host == remote_host and target["remote_port"] == remote_port: - raise RuntimeError( - "remote port " + str(remote_port) + " in host " + remote_host + "is already used in " + target_name) - + logger.warn("remote port {} in host {} is already used in {} ".format(str(remote_port), remote_host, target_name)) def get_remote_host(target): target_remote_host = target["remote_host"]
takahi-i/pfm
28ad611a179c3e0c463e197ce23e81d1d8968eb1
diff --git a/tests/pf_command/test_add.py b/tests/pf_command/test_add.py index 6f1b0d4..18b86b1 100644 --- a/tests/pf_command/test_add.py +++ b/tests/pf_command/test_add.py @@ -27,7 +27,7 @@ class TestPfm(unittest.TestCase): None) self.assertRaises(RuntimeError, lambda: add_command.generate_consistent_target({})) - def test_fail_to_add_same_local_port(self): + def test_add_same_local_port(self): targets = {'food-nonfood': { 'name': 'text-classification', @@ -37,7 +37,7 @@ class TestPfm(unittest.TestCase): } add_command = AddCommand("image-processing", None, "L", "localhost", "8888", "8888", "my.aws.com", None, None, None) - self.assertRaises(RuntimeError, lambda: add_command.generate_consistent_target(targets)) + self.assertEqual("8888", add_command.generate_consistent_target(targets)["local_port"]) def test_add_target_without_local_port(self): targets = {'food-nonfood': @@ -49,6 +49,7 @@ class TestPfm(unittest.TestCase): } add_command = AddCommand("image-processing", None, "L", "localhost", "8888", None, "my.aws.com", None, None, None) + self.assertEqual("49152", add_command.generate_consistent_target(targets)["local_port"]) def test_add_target_without_remote_port(self): targets = {'food-nonfood': @@ -85,7 +86,7 @@ class TestPfm(unittest.TestCase): add_command = AddCommand("image-processing", None, "L", "my-ml-instance.ml.aws.com", "9999", "7777", "ssh-server-instance.ml.aws.com", None, None, None) - self.assertRaises(RuntimeError, lambda: add_command.generate_consistent_target(targets)) + self.assertEqual("9999", add_command.generate_consistent_target(targets)["remote_port"]) def test_fail_to_add_same_remote_port_in_same_host2(self): targets = {'food-nonfood': @@ -98,4 +99,4 @@ class TestPfm(unittest.TestCase): add_command = AddCommand("image-processing", None, 'L', 'localhost', '9999', '7777', 'my-ml-instance.ml.aws.com', None, None, None) - self.assertRaises(RuntimeError, lambda: add_command.generate_consistent_target(targets)) + self.assertEqual("9999", add_command.generate_consistent_target(targets)["remote_port"])
Cannot add new stuff with the same local port * pfm version:0.3.0 * Python version:pyton:3.5.2 * Operating System:mac OS 10.13 ### Description I cannot add a new element using a local port that was used before. ### What I Did ``` $ pfm list +----------------------+------------+------------+--------------------------------+------------+-----------------+--------------------------------+--------------+ | name | type | local_port | remote_host | remote_por | login_user | ssh_server | server_port | | | | | | t | | | | +======================+============+============+================================+============+=================+================================+==============+ | test1 | L | 8888 | localhost | 10003 | None | test-server | | | | | | | | | | None | +----------------------+------------+------------+--------------------------------+------------+-----------------+--------------------------------+--------------+ ``` Then ``` $ pfm add --name test2 --local-port 8888 --ssh-server test-server local_port is not specified allocating remote_port for test2... remote_port of test2 is set to 49152 Failed to register... local port 8888 is already used in test1 ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/pf_command/test_add.py::TestPfm::test_add_same_local_port", "tests/pf_command/test_add.py::TestPfm::test_fail_to_add_same_remote_port_in_same_host", "tests/pf_command/test_add.py::TestPfm::test_fail_to_add_same_remote_port_in_same_host2" ]
[ "tests/pf_command/test_add.py::TestPfm::test_add_same_remote_port_in_different_host", "tests/pf_command/test_add.py::TestPfm::test_add_target_without_local_port", "tests/pf_command/test_add.py::TestPfm::test_add_target_without_remote_port", "tests/pf_command/test_add.py::TestPfm::test_generate_target_with_argument", "tests/pf_command/test_add.py::TestPfm::test_generate_target_with_options", "tests/pf_command/test_add.py::TestPfm::test_raise_exception_with_inadiquate_parameters" ]
{ "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2018-02-03T15:31:44Z"
mit
tantale__deprecated-69
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ec2f685..1e43f51 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -28,6 +28,8 @@ Fix - Resolve Python 2.7 support issue introduced in v1.2.14 in ``sphinx.py``. +- Fix #69: Add ``extra_stacklevel`` argument for interoperating with other wrapper functions (refer to #68 for a concrete use case). + Other ----- diff --git a/deprecated/classic.py b/deprecated/classic.py index 6ca3f27..84f683c 100644 --- a/deprecated/classic.py +++ b/deprecated/classic.py @@ -17,7 +17,7 @@ import wrapt try: # If the C extension for wrapt was compiled and wrapt/_wrappers.pyd exists, then the # stack level that should be passed to warnings.warn should be 2. However, if using - # a pure python wrapt, a extra stacklevel is required. + # a pure python wrapt, an extra stacklevel is required. import wrapt._wrappers _routine_stacklevel = 2 @@ -83,7 +83,7 @@ class ClassicAdapter(wrapt.AdapterFactory): return x + y """ - def __init__(self, reason="", version="", action=None, category=DeprecationWarning): + def __init__(self, reason="", version="", action=None, category=DeprecationWarning, extra_stacklevel=0): """ Construct a wrapper adapter. @@ -97,23 +97,33 @@ class ClassicAdapter(wrapt.AdapterFactory): If you follow the `Semantic Versioning <https://semver.org/>`_, the version number has the format "MAJOR.MINOR.PATCH". - :type action: str + :type action: Literal["default", "error", "ignore", "always", "module", "once"] :param action: A warning filter used to activate or not the deprecation warning. Can be one of "error", "ignore", "always", "default", "module", or "once". - If ``None`` or empty, the the global filtering mechanism is used. + If ``None`` or empty, the global filtering mechanism is used. See: `The Warnings Filter`_ in the Python documentation. - :type category: type + :type category: Type[Warning] :param category: The warning category to use for the deprecation warning. By default, the category class is :class:`~DeprecationWarning`, you can inherit this class to define your own deprecation warning category. + + :type extra_stacklevel: int + :param extra_stacklevel: + Number of additional stack levels to consider instrumentation rather than user code. + With the default value of 0, the warning refers to where the class was instantiated + or the function was called. + + .. versionchanged:: 1.2.15 + Add the *extra_stacklevel* parameter. """ self.reason = reason or "" self.version = version or "" self.action = action self.category = category + self.extra_stacklevel = extra_stacklevel super(ClassicAdapter, self).__init__() def get_deprecated_msg(self, wrapped, instance): @@ -161,12 +171,13 @@ class ClassicAdapter(wrapt.AdapterFactory): def wrapped_cls(cls, *args, **kwargs): msg = self.get_deprecated_msg(wrapped, None) + stacklevel = _class_stacklevel + self.extra_stacklevel if self.action: with warnings.catch_warnings(): warnings.simplefilter(self.action, self.category) - warnings.warn(msg, category=self.category, stacklevel=_class_stacklevel) + warnings.warn(msg, category=self.category, stacklevel=stacklevel) else: - warnings.warn(msg, category=self.category, stacklevel=_class_stacklevel) + warnings.warn(msg, category=self.category, stacklevel=stacklevel) if old_new1 is object.__new__: return old_new1(cls) # actually, we don't know the real signature of *old_new1* @@ -174,6 +185,24 @@ class ClassicAdapter(wrapt.AdapterFactory): wrapped.__new__ = staticmethod(wrapped_cls) + elif inspect.isroutine(wrapped): + @wrapt.decorator + def wrapper_function(wrapped_, instance_, args_, kwargs_): + msg = self.get_deprecated_msg(wrapped_, instance_) + stacklevel = _routine_stacklevel + self.extra_stacklevel + if self.action: + with warnings.catch_warnings(): + warnings.simplefilter(self.action, self.category) + warnings.warn(msg, category=self.category, stacklevel=stacklevel) + else: + warnings.warn(msg, category=self.category, stacklevel=stacklevel) + return wrapped_(*args_, **kwargs_) + + return wrapper_function(wrapped) + + else: + raise TypeError(repr(type(wrapped))) + return wrapped @@ -226,7 +255,7 @@ def deprecated(*args, **kwargs): return x + y The *category* keyword argument allow you to specify the deprecation warning class of your choice. - By default, :exc:`DeprecationWarning` is used but you can choose :exc:`FutureWarning`, + By default, :exc:`DeprecationWarning` is used, but you can choose :exc:`FutureWarning`, :exc:`PendingDeprecationWarning` or a custom subclass. .. code-block:: python @@ -240,7 +269,7 @@ def deprecated(*args, **kwargs): The *action* keyword argument allow you to locally change the warning filtering. *action* can be one of "error", "ignore", "always", "default", "module", or "once". - If ``None``, empty or missing, the the global filtering mechanism is used. + If ``None``, empty or missing, the global filtering mechanism is used. See: `The Warnings Filter`_ in the Python documentation. .. code-block:: python @@ -252,6 +281,9 @@ def deprecated(*args, **kwargs): def some_old_function(x, y): return x + y + The *extra_stacklevel* keyword argument allows you to specify additional stack levels + to consider instrumentation rather than user code. With the default value of 0, the + warning refers to where the class was instantiated or the function was called. """ if args and isinstance(args[0], string_types): kwargs['reason'] = args[0] @@ -261,32 +293,9 @@ def deprecated(*args, **kwargs): raise TypeError(repr(type(args[0]))) if args: - action = kwargs.get('action') - category = kwargs.get('category', DeprecationWarning) adapter_cls = kwargs.pop('adapter_cls', ClassicAdapter) adapter = adapter_cls(**kwargs) - wrapped = args[0] - if inspect.isclass(wrapped): - wrapped = adapter(wrapped) - return wrapped - - elif inspect.isroutine(wrapped): - - @wrapt.decorator(adapter=adapter) - def wrapper_function(wrapped_, instance_, args_, kwargs_): - msg = adapter.get_deprecated_msg(wrapped_, instance_) - if action: - with warnings.catch_warnings(): - warnings.simplefilter(action, category) - warnings.warn(msg, category=category, stacklevel=_routine_stacklevel) - else: - warnings.warn(msg, category=category, stacklevel=_routine_stacklevel) - return wrapped_(*args_, **kwargs_) - - return wrapper_function(wrapped) - - else: - raise TypeError(repr(type(wrapped))) + return adapter(wrapped) return functools.partial(deprecated, **kwargs) diff --git a/deprecated/sphinx.py b/deprecated/sphinx.py index be6dce9..70ef050 100644 --- a/deprecated/sphinx.py +++ b/deprecated/sphinx.py @@ -22,8 +22,6 @@ when the function/method is called or the class is constructed. import re import textwrap -import wrapt - from deprecated.classic import ClassicAdapter from deprecated.classic import deprecated as _classic_deprecated @@ -48,6 +46,7 @@ class SphinxAdapter(ClassicAdapter): version="", action=None, category=DeprecationWarning, + extra_stacklevel=0, line_length=70, ): """ @@ -67,29 +66,40 @@ class SphinxAdapter(ClassicAdapter): If you follow the `Semantic Versioning <https://semver.org/>`_, the version number has the format "MAJOR.MINOR.PATCH". - :type action: str + :type action: Literal["default", "error", "ignore", "always", "module", "once"] :param action: A warning filter used to activate or not the deprecation warning. Can be one of "error", "ignore", "always", "default", "module", or "once". - If ``None`` or empty, the the global filtering mechanism is used. + If ``None`` or empty, the global filtering mechanism is used. See: `The Warnings Filter`_ in the Python documentation. - :type category: type + :type category: Type[Warning] :param category: The warning category to use for the deprecation warning. By default, the category class is :class:`~DeprecationWarning`, you can inherit this class to define your own deprecation warning category. + :type extra_stacklevel: int + :param extra_stacklevel: + Number of additional stack levels to consider instrumentation rather than user code. + With the default value of 0, the warning refers to where the class was instantiated + or the function was called. + :type line_length: int :param line_length: Max line length of the directive text. If non nul, a long text is wrapped in several lines. + + .. versionchanged:: 1.2.15 + Add the *extra_stacklevel* parameter. """ if not version: # https://github.com/tantale/deprecated/issues/40 raise ValueError("'version' argument is required in Sphinx directives") self.directive = directive self.line_length = line_length - super(SphinxAdapter, self).__init__(reason=reason, version=version, action=action, category=category) + super(SphinxAdapter, self).__init__( + reason=reason, version=version, action=action, category=category, extra_stacklevel=extra_stacklevel + ) def __call__(self, wrapped): """ @@ -102,7 +112,7 @@ class SphinxAdapter(ClassicAdapter): # -- build the directive division fmt = ".. {directive}:: {version}" if self.version else ".. {directive}::" div_lines = [fmt.format(directive=self.directive, version=self.version)] - width = self.line_length - 3 if self.line_length > 3 else 2 ** 16 + width = self.line_length - 3 if self.line_length > 3 else 2**16 reason = textwrap.dedent(self.reason).strip() for paragraph in reason.splitlines(): if paragraph: @@ -153,7 +163,7 @@ class SphinxAdapter(ClassicAdapter): """ msg = super(SphinxAdapter, self).get_deprecated_msg(wrapped, instance) - # Strip Sphinx cross reference syntax (like ":function:", ":py:func:" and ":py:meth:") + # Strip Sphinx cross-reference syntax (like ":function:", ":py:func:" and ":py:meth:") # Possible values are ":role:`foo`", ":domain:role:`foo`" # where ``role`` and ``domain`` should match "[a-zA-Z]+" msg = re.sub(r"(?: : [a-zA-Z]+ )? : [a-zA-Z]+ : (`[^`]*`)", r"\1", msg, flags=re.X) @@ -163,7 +173,7 @@ class SphinxAdapter(ClassicAdapter): def versionadded(reason="", version="", line_length=70): """ This decorator can be used to insert a "versionadded" directive - in your function/class docstring in order to documents the + in your function/class docstring in order to document the version of the project which adds this new functionality in your library. :param str reason: @@ -193,7 +203,7 @@ def versionadded(reason="", version="", line_length=70): def versionchanged(reason="", version="", line_length=70): """ This decorator can be used to insert a "versionchanged" directive - in your function/class docstring in order to documents the + in your function/class docstring in order to document the version of the project which modifies this functionality in your library. :param str reason: @@ -222,7 +232,7 @@ def versionchanged(reason="", version="", line_length=70): def deprecated(reason="", version="", line_length=70, **kwargs): """ This decorator can be used to insert a "deprecated" directive - in your function/class docstring in order to documents the + in your function/class docstring in order to document the version of the project which deprecates this functionality in your library. :param str reason: @@ -242,17 +252,26 @@ def deprecated(reason="", version="", line_length=70, **kwargs): - "action": A warning filter used to activate or not the deprecation warning. Can be one of "error", "ignore", "always", "default", "module", or "once". - If ``None``, empty or missing, the the global filtering mechanism is used. + If ``None``, empty or missing, the global filtering mechanism is used. - "category": The warning category to use for the deprecation warning. By default, the category class is :class:`~DeprecationWarning`, you can inherit this class to define your own deprecation warning category. + - "extra_stacklevel": + Number of additional stack levels to consider instrumentation rather than user code. + With the default value of 0, the warning refers to where the class was instantiated + or the function was called. + + :return: a decorator used to deprecate a function. .. versionchanged:: 1.2.13 Change the signature of the decorator to reflect the valid use cases. + + .. versionchanged:: 1.2.15 + Add the *extra_stacklevel* parameter. """ directive = kwargs.pop('directive', 'deprecated') adapter_cls = kwargs.pop('adapter_cls', SphinxAdapter) diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst index 86cf056..00fb237 100644 --- a/docs/source/tutorial.rst +++ b/docs/source/tutorial.rst @@ -242,3 +242,28 @@ function will raise an exception because the *action* is set to "error". File "path/to/deprecated/classic.py", line 274, in wrapper_function warnings.warn(msg, category=category, stacklevel=_stacklevel) DeprecationWarning: Call to deprecated function (or staticmethod) foo. (do not call it) + + +Modifying the deprecated code reference +--------------------------------------- + +By default, when a deprecated function or class is called, the warning message indicates the location of the caller. + +The ``extra_stacklevel`` parameter allows customizing the stack level reference in the deprecation warning message. + +This parameter is particularly useful in scenarios where you have a factory or utility function that creates deprecated +objects or performs deprecated operations. By specifying an ``extra_stacklevel`` value, you can control the stack level +at which the deprecation warning is emitted, making it appear as if the calling function is the deprecated one, +rather than the actual deprecated entity. + +For example, if you have a factory function ``create_object()`` that creates deprecated objects, you can use +the ``extra_stacklevel`` parameter to emit the deprecation warning at the calling location. This provides clearer and +more actionable deprecation messages, allowing developers to identify and update the code that invokes the deprecated +functionality. + +For instance: + +.. literalinclude:: tutorial/warning_ctrl/extra_stacklevel_demo.py + +Please note that the ``extra_stacklevel`` value should be an integer indicating the number of stack levels to skip +when emitting the deprecation warning. diff --git a/docs/source/tutorial/warning_ctrl/extra_stacklevel_demo.py b/docs/source/tutorial/warning_ctrl/extra_stacklevel_demo.py new file mode 100644 index 0000000..3c0516c --- /dev/null +++ b/docs/source/tutorial/warning_ctrl/extra_stacklevel_demo.py @@ -0,0 +1,24 @@ +import warnings + +from deprecated import deprecated + + +@deprecated(version='1.0', extra_stacklevel=1) +class MyObject(object): + def __init__(self, name): + self.name = name + + def __str__(self): + return "object: {name}".format(name=self.name) + + +def create_object(name): + return MyObject(name) + + +if __name__ == '__main__': + warnings.filterwarnings("default", category=DeprecationWarning) + # warn here: + print(create_object("orange")) + # and also here: + print(create_object("banane"))
tantale/deprecated
1ebf9b89aa8a199d2d5b5d6634cd908eb80e1e7f
diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py index e4c00ef..0e467ae 100644 --- a/tests/test_deprecated.py +++ b/tests/test_deprecated.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import inspect import sys import warnings @@ -11,6 +12,10 @@ class MyDeprecationWarning(DeprecationWarning): pass +class WrongStackLevelWarning(DeprecationWarning): + pass + + _PARAMS = [ None, ((), {}), @@ -19,6 +24,7 @@ _PARAMS = [ ((), {'version': '1.2.3'}), ((), {'action': 'once'}), ((), {'category': MyDeprecationWarning}), + ((), {'extra_stacklevel': 1, 'category': WrongStackLevelWarning}), ] @@ -136,7 +142,7 @@ def test_classic_deprecated_function__warns(classic_deprecated_function): warn = warns[0] assert issubclass(warn.category, DeprecationWarning) assert "deprecated function (or staticmethod)" in str(warn.message) - assert warn.filename == __file__, 'Incorrect warning stackLevel' + assert warn.filename == __file__ or warn.category is WrongStackLevelWarning, 'Incorrect warning stackLevel' # noinspection PyShadowingNames @@ -148,7 +154,7 @@ def test_classic_deprecated_class__warns(classic_deprecated_class): warn = warns[0] assert issubclass(warn.category, DeprecationWarning) assert "deprecated class" in str(warn.message) - assert warn.filename == __file__, 'Incorrect warning stackLevel' + assert warn.filename == __file__ or warn.category is WrongStackLevelWarning, 'Incorrect warning stackLevel' # noinspection PyShadowingNames @@ -161,7 +167,7 @@ def test_classic_deprecated_method__warns(classic_deprecated_method): warn = warns[0] assert issubclass(warn.category, DeprecationWarning) assert "deprecated method" in str(warn.message) - assert warn.filename == __file__, 'Incorrect warning stackLevel' + assert warn.filename == __file__ or warn.category is WrongStackLevelWarning, 'Incorrect warning stackLevel' # noinspection PyShadowingNames @@ -173,7 +179,7 @@ def test_classic_deprecated_static_method__warns(classic_deprecated_static_metho warn = warns[0] assert issubclass(warn.category, DeprecationWarning) assert "deprecated function (or staticmethod)" in str(warn.message) - assert warn.filename == __file__, 'Incorrect warning stackLevel' + assert warn.filename == __file__ or warn.category is WrongStackLevelWarning, 'Incorrect warning stackLevel' # noinspection PyShadowingNames @@ -189,7 +195,7 @@ def test_classic_deprecated_class_method__warns(classic_deprecated_class_method) assert "deprecated class method" in str(warn.message) else: assert "deprecated function (or staticmethod)" in str(warn.message) - assert warn.filename == __file__, 'Incorrect warning stackLevel' + assert warn.filename == __file__ or warn.category is WrongStackLevelWarning, 'Incorrect warning stackLevel' def test_should_raise_type_error(): @@ -258,3 +264,61 @@ def test_respect_global_filter(): fun() fun() assert len(warns) == 1 + + +def test_default_stacklevel(): + """ + The objective of this unit test is to ensure that the triggered warning message, + when invoking the 'use_foo' function, correctly indicates the line where the + deprecated 'foo' function is called. + """ + + @deprecated.classic.deprecated + def foo(): + pass + + def use_foo(): + foo() + + with warnings.catch_warnings(record=True) as warns: + warnings.simplefilter("always") + use_foo() + + # Check that the warning path matches the module path + warn = warns[0] + assert warn.filename == __file__ + + # Check that the line number points to the first line inside 'use_foo' + use_foo_lineno = inspect.getsourcelines(use_foo)[1] + assert warn.lineno == use_foo_lineno + 1 + + +def test_extra_stacklevel(): + """ + The unit test utilizes an 'extra_stacklevel' of 1 to ensure that the warning message + accurately identifies the caller of the deprecated function. It verifies that when + the 'use_foo' function is called, the warning message correctly indicates the line + where the call to 'use_foo' is made. + """ + + @deprecated.classic.deprecated(extra_stacklevel=1) + def foo(): + pass + + def use_foo(): + foo() + + def demo(): + use_foo() + + with warnings.catch_warnings(record=True) as warns: + warnings.simplefilter("always") + demo() + + # Check that the warning path matches the module path + warn = warns[0] + assert warn.filename == __file__ + + # Check that the line number points to the first line inside 'demo' + demo_lineno = inspect.getsourcelines(demo)[1] + assert warn.lineno == demo_lineno + 1
[ENH] Stacklevel offset Over at https://github.com/coroa/pandas-indexing/pull/27, I am about to deprecate a [pandas accessor](https://pandas.pydata.org/docs/development/extending.html#registering-custom-accessors), but since these accessors are classes, which are instantiated by pandas upon access the warning is not emitted since the `stacklevel` is too low. # MWE ```python import pandas as pd from deprecated import deprecated @pd.api.extensions.register_dataframe_accessor("idx") @deprecated class IdxAccessor: def __init__(self, pandas_obj): self._obj = pandas_obj df = pd.DataFrame() df.idx ``` will only emit with a ~`warnings.simplefilter("always")`~ `warnings.simplefilter("default")` (edit: changed to include "default" which was pointed out below), since the callstack looks like: ```python Cell In[4], line 11 df.idx File ~/.local/conda/envs/aneris2/lib/python3.10/site-packages/pandas/core/accessor.py:182 in __get__ accessor_obj = self._accessor(obj) File ~/.local/conda/envs/aneris2/lib/python3.10/site-packages/deprecated/classic.py:169 in wrapped_cls warnings.warn(msg, category=self.category, stacklevel=_class_stacklevel) ``` so that the last `stacklevel=2` setting points to the `pandas.core.accessor` module instead of the user code. # Proposal Would you accept a PR to add a `stacklevel_offset` or `stacklevel` argument to deprecated which would either be added like: ```python warnings.warn(msg, category=self.category, stacklevel=_class_stacklevel + stacklevel_offset) ``` or could be used to replace the default `stacklevel`, like: ```python warnings.warn(msg, category=self.category, stacklevel=_class_stacklevel if stacklevel is None else stacklevel) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_deprecated.py::test_classic_deprecated_function__warns[classic_deprecated_function7]", "tests/test_deprecated.py::test_classic_deprecated_class__warns[classic_deprecated_class7]", "tests/test_deprecated.py::test_classic_deprecated_method__warns[classic_deprecated_method7]", "tests/test_deprecated.py::test_classic_deprecated_static_method__warns[classic_deprecated_static_method7]", "tests/test_deprecated.py::test_classic_deprecated_class_method__warns[classic_deprecated_class_method7]", "tests/test_deprecated.py::test_extra_stacklevel" ]
[ "tests/test_deprecated.py::test_classic_deprecated_function__warns[None]", "tests/test_deprecated.py::test_classic_deprecated_function__warns[classic_deprecated_function1]", "tests/test_deprecated.py::test_classic_deprecated_function__warns[classic_deprecated_function2]", "tests/test_deprecated.py::test_classic_deprecated_function__warns[classic_deprecated_function3]", "tests/test_deprecated.py::test_classic_deprecated_function__warns[classic_deprecated_function4]", "tests/test_deprecated.py::test_classic_deprecated_function__warns[classic_deprecated_function5]", "tests/test_deprecated.py::test_classic_deprecated_function__warns[classic_deprecated_function6]", "tests/test_deprecated.py::test_classic_deprecated_class__warns[None]", "tests/test_deprecated.py::test_classic_deprecated_class__warns[classic_deprecated_class1]", "tests/test_deprecated.py::test_classic_deprecated_class__warns[classic_deprecated_class2]", "tests/test_deprecated.py::test_classic_deprecated_class__warns[classic_deprecated_class3]", "tests/test_deprecated.py::test_classic_deprecated_class__warns[classic_deprecated_class4]", "tests/test_deprecated.py::test_classic_deprecated_class__warns[classic_deprecated_class5]", "tests/test_deprecated.py::test_classic_deprecated_class__warns[classic_deprecated_class6]", "tests/test_deprecated.py::test_classic_deprecated_method__warns[None]", "tests/test_deprecated.py::test_classic_deprecated_method__warns[classic_deprecated_method1]", "tests/test_deprecated.py::test_classic_deprecated_method__warns[classic_deprecated_method2]", "tests/test_deprecated.py::test_classic_deprecated_method__warns[classic_deprecated_method3]", "tests/test_deprecated.py::test_classic_deprecated_method__warns[classic_deprecated_method4]", "tests/test_deprecated.py::test_classic_deprecated_method__warns[classic_deprecated_method5]", "tests/test_deprecated.py::test_classic_deprecated_method__warns[classic_deprecated_method6]", "tests/test_deprecated.py::test_classic_deprecated_static_method__warns[None]", "tests/test_deprecated.py::test_classic_deprecated_static_method__warns[classic_deprecated_static_method1]", "tests/test_deprecated.py::test_classic_deprecated_static_method__warns[classic_deprecated_static_method2]", "tests/test_deprecated.py::test_classic_deprecated_static_method__warns[classic_deprecated_static_method3]", "tests/test_deprecated.py::test_classic_deprecated_static_method__warns[classic_deprecated_static_method4]", "tests/test_deprecated.py::test_classic_deprecated_static_method__warns[classic_deprecated_static_method5]", "tests/test_deprecated.py::test_classic_deprecated_static_method__warns[classic_deprecated_static_method6]", "tests/test_deprecated.py::test_classic_deprecated_class_method__warns[None]", "tests/test_deprecated.py::test_classic_deprecated_class_method__warns[classic_deprecated_class_method1]", "tests/test_deprecated.py::test_classic_deprecated_class_method__warns[classic_deprecated_class_method2]", "tests/test_deprecated.py::test_classic_deprecated_class_method__warns[classic_deprecated_class_method3]", "tests/test_deprecated.py::test_classic_deprecated_class_method__warns[classic_deprecated_class_method4]", "tests/test_deprecated.py::test_classic_deprecated_class_method__warns[classic_deprecated_class_method5]", "tests/test_deprecated.py::test_classic_deprecated_class_method__warns[classic_deprecated_class_method6]", "tests/test_deprecated.py::test_should_raise_type_error", "tests/test_deprecated.py::test_warning_is_ignored", "tests/test_deprecated.py::test_respect_global_filter", "tests/test_deprecated.py::test_default_stacklevel" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-07-08T22:09:05Z"
mit
tarioch__xirr-15
diff --git a/src/xirr/math.py b/src/xirr/math.py index 92ca4de..14bf286 100644 --- a/src/xirr/math.py +++ b/src/xirr/math.py @@ -50,7 +50,7 @@ def xirr(valuesPerDate): try: result = scipy.optimize.newton(lambda r: xnpv(valuesPerDate, r), 0) except (RuntimeError, OverflowError): # Failed to converge? - result = scipy.optimize.brentq(lambda r: xnpv(valuesPerDate, r), -0.999999999999999, 1e20) + result = scipy.optimize.brentq(lambda r: xnpv(valuesPerDate, r), -0.999999999999999, 1e20, maxiter=10**6) if not isinstance(result, complex): return result
tarioch/xirr
9d046ac2db139ed311fc146ff1a7feae22e549d1
diff --git a/tests/test_math.py b/tests/test_math.py index 0c78f3f..48b0a1f 100644 --- a/tests/test_math.py +++ b/tests/test_math.py @@ -21,6 +21,18 @@ from xirr.math import xirr, cleanXirr, xnpv ({'2011-01-01': 1, '2011-01-02': 0, '2012-01-01': 1}, float("inf")), ({'2011-07-01': -10000, '2014-07-01': 1}, -0.9535), ({'2011-07-01': 10000, '2014-07-01': -1}, -0.9535), + ({ + '2016-04-06': 18902.0, + '2016-05-04': 83600.0, + '2016-05-12': -5780.0, + '2017-05-08': -4080.0, + '2017-07-03': -56780.0, + '2018-05-07': -2210.0, + '2019-05-06': -2380.0, + '2019-10-01': 33975.0, + '2020-03-13': 23067.98, + '2020-05-07': -1619.57, + }, -1), ]) def test_xirr(valuesPerDateString, expected): valuesPerDate = {datetime.fromisoformat(k).date(): v for k, v in valuesPerDateString.items()}
Problematic example Here's an interesting example of numbers: Values: ``` [5046.0, 5037.299999999999, 4995.25, 5795.5, -1085.6, 4998.0, 4557.8, 4815.0, 4928.0, -2197.05, 5424.0, -2565.0, -2872.8, 10085.0, 9500.0, 9976.8, 14880.000000000002, -6094.7, 19522.359999999997, 18035.0, 10477.44] ``` Dates: ``` [Timestamp('2015-08-03 00:00:00+0000', tz='UTC'), Timestamp('2015-10-20 00:00:00+0000', tz='UTC'), Timestamp('2016-01-11 00:00:00+0000', tz='UTC'), Timestamp('2016-04-06 00:00:00+0000', tz='UTC'), Timestamp('2016-04-26 00:00:00+0000', tz='UTC'), Timestamp('2016-07-19 00:00:00+0000', tz='UTC'), Timestamp('2016-10-11 00:00:00+0000', tz='UTC'), Timestamp('2017-01-11 00:00:00+0000', tz='UTC'), Timestamp('2017-04-11 00:00:00+0000', tz='UTC'), Timestamp('2017-04-25 00:00:00+0000', tz='UTC'), Timestamp('2017-10-12 00:00:00+0000', tz='UTC'), Timestamp('2018-04-24 00:00:00+0000', tz='UTC'), Timestamp('2019-04-23 00:00:00+0000', tz='UTC'), Timestamp('2020-02-25 00:00:00+0000', tz='UTC'), Timestamp('2020-03-03 00:00:00+0000', tz='UTC'), Timestamp('2020-03-09 00:00:00+0000', tz='UTC'), Timestamp('2020-04-06 00:00:00+0000', tz='UTC'), Timestamp('2020-04-23 00:00:00+0000', tz='UTC'), Timestamp('2020-06-05 00:00:00+0000', tz='UTC'), Timestamp('2020-08-05 00:00:00+0000', tz='UTC'), Timestamp('2020-08-19 00:00:00+0000', tz='UTC')] ``` It produces an exception in xirr: `ValueError: f(a) and f(b) must have different signs` It also produces an error in Google Sheets. Excel, however, produces a result: ![image](https://user-images.githubusercontent.com/8012482/92327892-26b14d00-f05d-11ea-9bf9-d102c3ed394f.png) Any idea what is going on here?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_math.py::test_xirr[valuesPerDateString15--1]" ]
[ "tests/test_math.py::test_xirr[valuesPerDateString0--0.6454]", "tests/test_math.py::test_xirr[valuesPerDateString1--0.6454]", "tests/test_math.py::test_xirr[valuesPerDateString2--0.6581]", "tests/test_math.py::test_xirr[valuesPerDateString3-None]", "tests/test_math.py::test_xirr[valuesPerDateString4--inf]", "tests/test_math.py::test_xirr[valuesPerDateString5-inf]", "tests/test_math.py::test_xirr[valuesPerDateString6-0.0]", "tests/test_math.py::test_xirr[valuesPerDateString7-0.0]", "tests/test_math.py::test_xirr[valuesPerDateString8-412461.6383]", "tests/test_math.py::test_xirr[valuesPerDateString9-1.2238535289956518e+16]", "tests/test_math.py::test_xirr[valuesPerDateString10--0.8037]", "tests/test_math.py::test_xirr[valuesPerDateString11--inf]", "tests/test_math.py::test_xirr[valuesPerDateString12-inf]", "tests/test_math.py::test_xirr[valuesPerDateString13--0.9535]", "tests/test_math.py::test_xirr[valuesPerDateString14--0.9535]", "tests/test_math.py::test_cleanXirr[valuesPerDateString0--0.6454]", "tests/test_math.py::test_cleanXirr[valuesPerDateString1--0.6454]", "tests/test_math.py::test_cleanXirr[valuesPerDateString2--0.6581]", "tests/test_math.py::test_cleanXirr[valuesPerDateString3-None]", "tests/test_math.py::test_cleanXirr[valuesPerDateString4-None]", "tests/test_math.py::test_cleanXirr[valuesPerDateString5-None]", "tests/test_math.py::test_cleanXirr[valuesPerDateString6-None]", "tests/test_math.py::test_cleanXirr[valuesPerDateString7--0.8037]", "tests/test_math.py::test_cleanXirr[valuesPerDateString8-None]", "tests/test_math.py::test_xnpv[valuesPerDateString0--1.0-inf]", "tests/test_math.py::test_xnpv[valuesPerDateString1--0.1-22.2575]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media" ], "has_test_patch": true, "is_lite": false }
"2020-09-15T16:30:22Z"
mit
tarohi24__typedflow-16
diff --git a/typedflow/typedflow.py b/typedflow/typedflow.py index 8185bd6..b6dc426 100644 --- a/typedflow/typedflow.py +++ b/typedflow/typedflow.py @@ -19,6 +19,10 @@ T = TypeVar('T') # serializable K = TypeVar('K') # serializable +class BatchIsEmpty(Exception): + pass + + @dataclass class Batch(Generic[T]): batch_id: int @@ -31,8 +35,18 @@ class Task(Generic[T, K]): def process(self, batch: Batch[T]) -> Batch[K]: - lst: List[K] = [self.func(item) for item in batch.data] - return Batch(batch_id=batch.batch_id, data=lst) + products: List[K] = [] + for item in batch.data: + try: + products.append(self.func(item)) + except Exception as e: + logger.warn(repr(e)) + continue + if len(products) > 0: + return Batch[K](batch_id=batch.batch_id, + data=products) + else: + raise BatchIsEmpty() @dataclass @@ -97,7 +111,6 @@ class Pipeline: for batch in self.loader.load(): try: product: Batch = _run(batch, self.pipeline) - except Exception as e: - logger.warn(repr(e)) + except BatchIsEmpty: continue self.dumper.dump(product)
tarohi24/typedflow
885708ac898ab55f2cd467b54695ccf4c468edc8
diff --git a/typedflow/tests/typedflow/test_task.py b/typedflow/tests/typedflow/test_task.py index 3294062..93e6463 100644 --- a/typedflow/tests/typedflow/test_task.py +++ b/typedflow/tests/typedflow/test_task.py @@ -77,7 +77,7 @@ def test_process(pl, capsys): def test_except_batch(invalid_pl, capsys): invalid_pl.run() out, _ = capsys.readouterr() - assert out == '' + assert out == '15\n12\n' def test_multibatch_process(mutlibatch_pl, capsys):
Batch doesn't flow if any items in it has an error even if the majority isn't troublesome
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "typedflow/tests/typedflow/test_task.py::test_except_batch" ]
[ "typedflow/tests/typedflow/test_task.py::test_process", "typedflow/tests/typedflow/test_task.py::test_multibatch_process", "typedflow/tests/typedflow/test_task.py::test_multibatch_ids" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-11-02T10:00:52Z"
mit
tarohi24__typedflow-68
diff --git a/typedflow/nodes/base.py b/typedflow/nodes/base.py index ece0895..b9853f9 100644 --- a/typedflow/nodes/base.py +++ b/typedflow/nodes/base.py @@ -113,7 +113,8 @@ class ConsumerNode: None """ assert len(self.precs) == 0, 'Some arguments have been already set' - self.precs: Dict[str, ProviderNode] = args + for name, prec in args.items(): + self.set_upstream_node(name, prec) return self
tarohi24/typedflow
2127e74314d2b97d596cfc12ed8fb257bb688d6f
diff --git a/typedflow/tests/flow/test_flow.py b/typedflow/tests/flow/test_flow.py index aa31917..7682475 100644 --- a/typedflow/tests/flow/test_flow.py +++ b/typedflow/tests/flow/test_flow.py @@ -209,3 +209,4 @@ def test_declare_inputs_when_definition_with_multiple_args(): node_dump = DumpNode(dump)({'a': node_task}) flow = Flow([node_dump, ]) flow.typecheck() + assert node_task.cache_table.life == 1
The new syntax doesn't work It doesn't accept args in the correct way. For instance, life of cache tables are never incremented.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "typedflow/tests/flow/test_flow.py::test_declare_inputs_when_definition_with_multiple_args" ]
[ "typedflow/tests/flow/test_flow.py::test_flow_run", "typedflow/tests/flow/test_flow.py::test_typecheck_success", "typedflow/tests/flow/test_flow.py::test_typecheck_failure", "typedflow/tests/flow/test_flow.py::test_incoming_multiple_node", "typedflow/tests/flow/test_flow.py::test_arg_inheritance", "typedflow/tests/flow/test_flow.py::test_declare_inputs_when_definition" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-12-10T15:26:34Z"
mit
taverntesting__tavern-851
diff --git a/tavern/_core/exceptions.py b/tavern/_core/exceptions.py index d8c78b6..803abe0 100644 --- a/tavern/_core/exceptions.py +++ b/tavern/_core/exceptions.py @@ -114,7 +114,7 @@ class InvalidFormattedJsonError(TavernException): """Tried to use the magic json format tag in an invalid way""" -class InvalidExtBlockException(TavernException): +class MisplacedExtBlockException(TavernException): """Tried to use the '$ext' block in a place it is no longer valid to use it""" def __init__(self, block) -> None: diff --git a/tavern/_core/extfunctions.py b/tavern/_core/extfunctions.py index c7b3325..e7b534e 100644 --- a/tavern/_core/extfunctions.py +++ b/tavern/_core/extfunctions.py @@ -123,6 +123,11 @@ def get_wrapped_create_function(ext: Mapping): def _get_ext_values(ext: Mapping): + if not isinstance(ext, Mapping): + raise exceptions.InvalidExtFunctionError( + "ext block should be a dict, but it was a {}".format(type(ext)) + ) + args = ext.get("extra_args") or () kwargs = ext.get("extra_kwargs") or {} try: @@ -145,14 +150,23 @@ def update_from_ext(request_args: dict, keys_to_check: List[str]) -> None: """ new_args = {} + logger = _getlogger() for key in keys_to_check: try: - func = get_wrapped_create_function(request_args[key].pop("$ext")) - except (KeyError, TypeError, AttributeError): - pass - else: - new_args[key] = func() + block = request_args[key] + except KeyError: + logger.debug("No %s block", key) + continue + + try: + pop = block.pop("$ext") + except (KeyError, AttributeError, TypeError): + logger.debug("No ext functions in %s block", key) + continue + + func = get_wrapped_create_function(pop) + new_args[key] = func() merged_args = deep_dict_merge(request_args, new_args) diff --git a/tavern/_plugins/mqtt/request.py b/tavern/_plugins/mqtt/request.py index db7eae7..0a9de87 100644 --- a/tavern/_plugins/mqtt/request.py +++ b/tavern/_plugins/mqtt/request.py @@ -1,7 +1,7 @@ import functools import json import logging -from typing import Mapping +from typing import Dict from box.box import Box @@ -16,21 +16,19 @@ from tavern.request import BaseRequest logger = logging.getLogger(__name__) -def get_publish_args(rspec: Mapping, test_block_config: TestConfig) -> dict: - """Format mqtt request args - - Todo: - Anything else to do here? - """ +def get_publish_args(rspec: Dict, test_block_config: TestConfig) -> dict: + """Format mqtt request args and update using ext functions""" fspec = format_keys(rspec, test_block_config.variables) - if "json" in rspec: - if "payload" in rspec: + if "json" in fspec: + if "payload" in fspec: raise exceptions.BadSchemaError( "Can only specify one of 'payload' or 'json' in MQTT request" ) + update_from_ext(fspec, ["json"]) + fspec["payload"] = json.dumps(fspec.pop("json")) return fspec @@ -43,15 +41,15 @@ class MQTTRequest(BaseRequest): """ def __init__( - self, client: MQTTClient, rspec: Mapping, test_block_config: TestConfig + self, client: MQTTClient, rspec: Dict, test_block_config: TestConfig ) -> None: expected = {"topic", "payload", "json", "qos", "retain"} check_expected_keys(expected, rspec) publish_args = get_publish_args(rspec, test_block_config) - update_from_ext(publish_args, ["json"]) + self._publish_args = publish_args self._prepared = functools.partial(client.publish, **publish_args) # Need to do this here because get_publish_args will modify the original diff --git a/tavern/_plugins/mqtt/response.py b/tavern/_plugins/mqtt/response.py index a2e362b..73513f8 100644 --- a/tavern/_plugins/mqtt/response.py +++ b/tavern/_plugins/mqtt/response.py @@ -335,7 +335,7 @@ class _MessageVerifier: json_payload = True if payload.pop("$ext", None): - raise exceptions.InvalidExtBlockException( + raise exceptions.MisplacedExtBlockException( "json", ) elif "payload" in expected: diff --git a/tavern/_plugins/rest/response.py b/tavern/_plugins/rest/response.py index 97bc494..ca54b11 100644 --- a/tavern/_plugins/rest/response.py +++ b/tavern/_plugins/rest/response.py @@ -218,7 +218,7 @@ class RestResponse(BaseResponse): if isinstance(expected_block, dict): if expected_block.pop("$ext", None): - raise exceptions.InvalidExtBlockException( + raise exceptions.MisplacedExtBlockException( blockname, ) diff --git a/tavern/response.py b/tavern/response.py index f9eba9f..8bee7a6 100644 --- a/tavern/response.py +++ b/tavern/response.py @@ -139,7 +139,7 @@ class BaseResponse: if isinstance(block, dict): check_ext_functions(block.get("$ext", None)) if nfuncs != len(self.validate_functions): - raise exceptions.InvalidExtBlockException( + raise exceptions.MisplacedExtBlockException( name, )
taverntesting/tavern
7e624698ad534342bfc302bb1216eeb5e214b240
diff --git a/example/mqtt/test_mqtt.tavern.yaml b/example/mqtt/test_mqtt.tavern.yaml index 145d3b6..956a18a 100644 --- a/example/mqtt/test_mqtt.tavern.yaml +++ b/example/mqtt/test_mqtt.tavern.yaml @@ -740,3 +740,28 @@ stages: payload: "there" timeout: 5 qos: 1 + +--- + +test_name: Update an MQTT publish from an ext function + +includes: + - !include common.yaml + +paho-mqtt: *mqtt_spec + +stages: + - *setup_device_for_test + + - name: step 1 - ping/pong + mqtt_publish: + topic: /device/{random_device_id}/echo + json: + $ext: + function: testing_utils:return_hello + mqtt_response: + topic: /device/{random_device_id}/echo/response + timeout: 3 + qos: 1 + json: + hello: there diff --git a/example/mqtt/testing_utils.py b/example/mqtt/testing_utils.py index 70021ab..f483ca9 100644 --- a/example/mqtt/testing_utils.py +++ b/example/mqtt/testing_utils.py @@ -3,5 +3,5 @@ def message_says_hello(msg): assert msg.payload.get("message") == "hello world" -def return_hello(_): +def return_hello(_=None): return {"hello": "there"} diff --git a/tests/unit/test_mqtt.py b/tests/unit/test_mqtt.py index b9603da..1d55b06 100644 --- a/tests/unit/test_mqtt.py +++ b/tests/unit/test_mqtt.py @@ -1,3 +1,4 @@ +from typing import Dict from unittest.mock import MagicMock, Mock, patch import paho.mqtt.client as paho @@ -18,18 +19,19 @@ def test_host_required(): MQTTClient(**args) -class TestClient: - @pytest.fixture(name="fake_client") - def fix_fake_client(self): - args = {"connect": {"host": "localhost"}} [email protected](name="fake_client") +def fix_fake_client(): + args = {"connect": {"host": "localhost"}} + + mqtt_client = MQTTClient(**args) - mqtt_client = MQTTClient(**args) + mqtt_client._subscribed[2] = _Subscription("abc") + mqtt_client._subscription_mappings["abc"] = 2 - mqtt_client._subscribed[2] = _Subscription("abc") - mqtt_client._subscription_mappings["abc"] = 2 + return mqtt_client - return mqtt_client +class TestClient: def test_no_queue(self, fake_client): """Trying to fetch from a nonexistent queue raised exception""" @@ -192,3 +194,33 @@ class TestSubscription: MQTTClient._on_subscribe(mock_client, "abc", {}, 123, 0) assert mock_client._subscribed == {} + + +class TestExtFunctions: + @pytest.fixture() + def basic_mqtt_request_args(self) -> Dict: + return { + "topic": "/a/b/c", + } + + def test_basic(self, fake_client, basic_mqtt_request_args, includes): + MQTTRequest(fake_client, basic_mqtt_request_args, includes) + + def test_ext_function_bad(self, fake_client, basic_mqtt_request_args, includes): + basic_mqtt_request_args["json"] = {"$ext": "kk"} + + with pytest.raises(exceptions.InvalidExtFunctionError): + MQTTRequest(fake_client, basic_mqtt_request_args, includes) + + def test_ext_function_good(self, fake_client, basic_mqtt_request_args, includes): + basic_mqtt_request_args["json"] = { + "$ext": { + "function": "operator:add", + "extra_args": (1, 2), + } + } + + m = MQTTRequest(fake_client, basic_mqtt_request_args, includes) + + assert "payload" in m._publish_args + assert m._publish_args["payload"] == "3"
Unable to use external function in MQTT publish I tried using an external function in a MQTT publish request, and the function wasn't getting evaluated. My request looks like this: ``` - id: publish_thing_1 name: Publish thing 1 mqtt_publish: topic: &ping_topic '/device/123/ping' qos: 1 json: $ext: function: utils.testing_utils:my_function thing_1: abc mqtt_response: topic: *ping_topic json: header: test thing_1: abc timeout: 5 qos: 1 ``` It looks like the line below in [request.py](https://github.com/taverntesting/tavern/blob/master/tavern/_plugins/mqtt/request.py) ``` update_from_ext(publish_args, ["json"], test_block_config) ``` should be ``` update_from_ext(publish_args, ["payload"], test_block_config) ``` instead, since publish_args looks like this: ``` {'topic': '/device/123/ping', 'qos': 1, 'payload': '{"$ext": {"function": "utils.testing_utils:my_function"}, "thing_1": "abc"}'} ``` Note that the `payload` value is a string, which prevents external function evaluation even after I do the `update_from_ext` change. Before I go down the rabbit hole too much, I wanted to confirm that I've configured the request properly, and that this feature is expected to work after this [PR](https://github.com/taverntesting/tavern/pull/620).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/test_mqtt.py::TestExtFunctions::test_ext_function_bad", "tests/unit/test_mqtt.py::TestExtFunctions::test_ext_function_good" ]
[ "tests/unit/test_mqtt.py::test_host_required", "tests/unit/test_mqtt.py::TestClient::test_no_queue", "tests/unit/test_mqtt.py::TestClient::test_no_message", "tests/unit/test_mqtt.py::TestClient::test_message_queued", "tests/unit/test_mqtt.py::TestClient::test_context_connection_failure", "tests/unit/test_mqtt.py::TestClient::test_context_connection_success", "tests/unit/test_mqtt.py::TestClient::test_assert_message_published", "tests/unit/test_mqtt.py::TestClient::test_assert_message_published_unknown_err", "tests/unit/test_mqtt.py::TestTLS::test_missing_cert_gives_error", "tests/unit/test_mqtt.py::TestTLS::test_disabled_tls", "tests/unit/test_mqtt.py::TestTLS::test_invalid_tls_ver", "tests/unit/test_mqtt.py::TestRequests::test_unknown_fields", "tests/unit/test_mqtt.py::TestRequests::test_missing_format", "tests/unit/test_mqtt.py::TestRequests::test_correct_format", "tests/unit/test_mqtt.py::TestSubscription::test_handles_subscriptions", "tests/unit/test_mqtt.py::TestSubscription::test_no_subscribe_on_err", "tests/unit/test_mqtt.py::TestSubscription::test_no_subscribe_on_unrecognised_suback", "tests/unit/test_mqtt.py::TestExtFunctions::test_basic" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-02-16T12:53:45Z"
mit
taxprofiler__taxpasta-132
diff --git a/CHANGELOG.md b/CHANGELOG.md index d4c4a92..0678f92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 `merge` commands, which inserts a new column `rank_lineage` to results that contains semi-colon-separated strings with the ranks (#130). +### Changed + +- Reversed the order of lineages printed to output files (#131). + ## [0.4.1] - (2023-07-13) ### Fixed diff --git a/src/taxpasta/infrastructure/domain/service/taxopy_taxonomy_service.py b/src/taxpasta/infrastructure/domain/service/taxopy_taxonomy_service.py index 9635340..4141e9f 100644 --- a/src/taxpasta/infrastructure/domain/service/taxopy_taxonomy_service.py +++ b/src/taxpasta/infrastructure/domain/service/taxopy_taxonomy_service.py @@ -73,7 +73,7 @@ class TaxopyTaxonomyService(TaxonomyService): taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db) except TaxidError: return None - return taxon.name_lineage + return list(reversed(taxon.name_lineage)) def get_taxon_identifier_lineage(self, taxonomy_id: int) -> Optional[List[int]]: """Return the lineage of a given taxonomy identifier as identifiers.""" @@ -81,7 +81,7 @@ class TaxopyTaxonomyService(TaxonomyService): taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db) except TaxidError: return None - return taxon.taxid_lineage + return list(reversed(taxon.taxid_lineage)) def get_taxon_rank_lineage(self, taxonomy_id: int) -> Optional[List[str]]: """Return the lineage of a given taxonomy identifier as ranks.""" @@ -89,7 +89,7 @@ class TaxopyTaxonomyService(TaxonomyService): taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db) except TaxidError: return None - return list(taxon.rank_name_dictionary.keys()) + return list(reversed(taxon.rank_name_dictionary.keys())) def add_name(self, table: DataFrame[ResultTable]) -> DataFrame[ResultTable]: """Add a column for the taxon name to the given table.""" @@ -123,11 +123,10 @@ class TaxopyTaxonomyService(TaxonomyService): def _name_lineage_as_str(self, taxonomy_id: int) -> Optional[str]: """Return the lineage of a taxon as concatenated names.""" - try: - taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db) - except TaxidError: + if lineage := self.get_taxon_name_lineage(taxonomy_id): + return ";".join(lineage) + else: return None - return ";".join(taxon.name_lineage) def add_identifier_lineage( self, table: DataFrame[ResultTable] @@ -143,11 +142,10 @@ class TaxopyTaxonomyService(TaxonomyService): def _taxid_lineage_as_str(self, taxonomy_id: int) -> Optional[str]: """Return the lineage of a taxon as concatenated identifiers.""" - try: - taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db) - except TaxidError: + if lineage := self.get_taxon_identifier_lineage(taxonomy_id): + return ";".join(str(tax_id) for tax_id in lineage) + else: return None - return ";".join([str(tax_id) for tax_id in taxon.taxid_lineage]) def add_rank_lineage(self, table: DataFrame[ResultTable]) -> DataFrame[ResultTable]: """Add a column for the taxon lineage as ranks to the given table.""" @@ -161,11 +159,10 @@ class TaxopyTaxonomyService(TaxonomyService): def _rank_lineage_as_str(self, taxonomy_id: int) -> Optional[str]: """Return the rank lineage of a taxon as concatenated identifiers.""" - try: - taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db) - except TaxidError: + if lineage := self.get_taxon_rank_lineage(taxonomy_id): + return ";".join(lineage) + else: return None - return ";".join(taxon.rank_name_dictionary.keys()) def summarise_at( self, profile: DataFrame[StandardProfile], rank: str
taxprofiler/taxpasta
d3d03bfe87876b8fe81bc4b9a80775bb5250ec94
diff --git a/tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py b/tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py index 5109384..1c3e2ab 100644 --- a/tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py +++ b/tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py @@ -82,14 +82,14 @@ def test_get_taxon_rank(tax_service: TaxopyTaxonomyService, tax_id: int, expecte ( 86398254, [ - "Pseudomonadales", - "Gammaproteobacteria", - "Proteobacteria", - "Bacteria", "root", + "Bacteria", + "Proteobacteria", + "Gammaproteobacteria", + "Pseudomonadales", ], ), - (1199096325, ["Saccharomycetes", "Ascomycota", "Eukaryota", "root"]), + (1199096325, ["root", "Eukaryota", "Ascomycota", "Saccharomycetes"]), ], ) def test_get_taxon_name_lineage( @@ -104,8 +104,8 @@ def test_get_taxon_name_lineage( [ (1, [1]), (42, None), - (86398254, [86398254, 329474883, 1641076285, 609216830, 1]), - (1199096325, [1199096325, 432158898, 476817098, 1]), + (86398254, [1, 609216830, 1641076285, 329474883, 86398254]), + (1199096325, [1, 476817098, 432158898, 1199096325]), ], ) def test_get_taxon_identifier_lineage( @@ -120,8 +120,8 @@ def test_get_taxon_identifier_lineage( [ (1, []), (42, None), - (86398254, ["order", "class", "phylum", "superkingdom"]), - (1199096325, ["class", "phylum", "superkingdom"]), + (86398254, ["superkingdom", "phylum", "class", "order"]), + (1199096325, ["superkingdom", "phylum", "class"]), ], ) def test_get_taxon_rank_lineage( @@ -145,9 +145,9 @@ def test_get_taxon_rank_lineage( [ "root", None, - "Pseudomonadales;Gammaproteobacteria;Proteobacteria;" - "Bacteria;root", - "Saccharomycetes;Ascomycota;Eukaryota;root", + "root;Bacteria;Proteobacteria;Gammaproteobacteria;" + "Pseudomonadales", + "root;Eukaryota;Ascomycota;Saccharomycetes", ], ), ] @@ -177,8 +177,8 @@ def test_add_name_lineage( [ "1", None, - "86398254;329474883;1641076285;609216830;1", - "1199096325;432158898;476817098;1", + "1;609216830;1641076285;329474883;86398254", + "1;476817098;432158898;1199096325", ], ), ] @@ -206,10 +206,10 @@ def test_add_identifier_lineage( ( "rank_lineage", [ - "", None, - "order;class;phylum;superkingdom", - "class;phylum;superkingdom", + None, + "superkingdom;phylum;class;order", + "superkingdom;phylum;class", ], ), ]
[Feature] Reverse the order of the current lineages ### Checklist - [X] There are [no similar issues or pull requests](https://github.com/taxprofiler/taxpasta/issues) for this yet. ### Problem Lineages are currently printed in order from taxon of interest to higher ranks (until root). This is in contrast to all other tools which print lineages from highest rank (superkingdom) to most specific. ### Solution Reverse the order of lineages. ### Alternatives One can find arguments for keeping either order but changing it seems to be the _de facto_ standard in the field. ### Anything else? _No response_
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name_lineage[86398254-expected2]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name_lineage[1199096325-expected3]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_identifier_lineage[86398254-expected2]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_identifier_lineage[1199096325-expected3]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank_lineage[86398254-expected2]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank_lineage[1199096325-expected3]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_add_name_lineage[result0-expected0]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_add_identifier_lineage[result0-expected0]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_add_rank_lineage[result0-expected0]" ]
[ "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[1-root]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[42-None]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[86398254-Pseudomonadales]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[432158898-Ascomycota]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[492356122-Saccharomyces", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[1945799576-Escherichia", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[1887621118-Pseudomonas", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[1-no", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[42-None]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[476817098-superkingdom]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[432158898-phylum]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[329474883-class]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[86398254-order]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[87250111-family]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[933264868-genus]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[1887621118-species]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name_lineage[1-expected0]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name_lineage[42-None]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_identifier_lineage[1-expected0]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_identifier_lineage[42-None]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank_lineage[1-expected0]", "tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank_lineage[42-None]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-08-22T13:49:24Z"
apache-2.0
tcalmant__ipopo-120
diff --git a/pelix/ipopo/handlers/requiresvarfilter.py b/pelix/ipopo/handlers/requiresvarfilter.py index a9895e0..133f51b 100644 --- a/pelix/ipopo/handlers/requiresvarfilter.py +++ b/pelix/ipopo/handlers/requiresvarfilter.py @@ -239,6 +239,9 @@ class _VariableFilterMixIn: self.stop() self.start() + # Force bindings update + self._ipopo_instance.update_bindings() + for svc_ref in self.get_bindings(): # Check if the current reference matches the filter if not self.requirement.filter.matches(
tcalmant/ipopo
1d0add361ca219da8fdf72bb9ba8cb0ade01ad2f
diff --git a/tests/ipopo/issue_119_bundle.py b/tests/ipopo/issue_119_bundle.py new file mode 100644 index 0000000..669ba69 --- /dev/null +++ b/tests/ipopo/issue_119_bundle.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# -- Content-Encoding: UTF-8 -- +""" +Issue 119 (late binding issue on RequiresVarFilter) use case +""" + +from pelix.ipopo.decorators import ( + ComponentFactory, + Provides, + Property, + Requires, + RequiresVarFilter, +) + + +@ComponentFactory("provider-factory") +@Property("providing", "providing", None) +@Provides("required-service") +class Provider: + def __init__(self): + self.providing = None + + +@ComponentFactory("varservice-factory") +@Property("search", "search") +@RequiresVarFilter( + "depends", "required-service", spec_filter="(prop={search})" +) +class VarcConsumer: + def __init__(self): + self.depends = None + self.search = None diff --git a/tests/ipopo/test_requires_varfilter.py b/tests/ipopo/test_requires_varfilter.py index 7ed07ad..e54f16d 100644 --- a/tests/ipopo/test_requires_varfilter.py +++ b/tests/ipopo/test_requires_varfilter.py @@ -9,6 +9,9 @@ Tests the iPOPO @RequiresVarFilter decorator. # Standard library import random import string + +from pelix.ipopo.instance import StoredInstance + try: import unittest2 as unittest except ImportError: @@ -38,6 +41,7 @@ class RequiresVarFilterTest(unittest.TestCase): """ Tests the "requires variable filter" handler behavior """ + def setUp(self): """ Called before each test. Initiates a framework. @@ -61,22 +65,30 @@ class RequiresVarFilterTest(unittest.TestCase): assert isinstance(context, BundleContext) # Prepare random string values - random_static_1 = ''.join(random.choice(string.ascii_letters) - for _ in range(50)) - random_static_2 = ''.join(random.choice(string.ascii_letters) - for _ in range(50)) + random_static_1 = "".join( + random.choice(string.ascii_letters) for _ in range(50) + ) + random_static_2 = "".join( + random.choice(string.ascii_letters) for _ in range(50) + ) # Assert that the service is not yet available - self.assertIsNone(context.get_service_reference(IEchoService), - "Service is already registered") + self.assertIsNone( + context.get_service_reference(IEchoService), + "Service is already registered", + ) # Instantiate the components consumer_single = self.ipopo.instantiate( - module.FACTORY_REQUIRES_VAR_FILTER, NAME_A, - {"static": random_static_1}) + module.FACTORY_REQUIRES_VAR_FILTER, + NAME_A, + {"static": random_static_1}, + ) consumer_multi = self.ipopo.instantiate( - module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, NAME_B, - {"static": random_static_1}) + module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, + NAME_B, + {"static": random_static_1}, + ) consumers = (consumer_single, consumer_multi) # Force the "answer" property to an int @@ -85,46 +97,60 @@ class RequiresVarFilterTest(unittest.TestCase): # Component must be invalid for consumer in consumers: - self.assertListEqual([IPopoEvent.INSTANTIATED], consumer.states, - "Invalid component states: {0}" - .format(consumer.states)) + self.assertListEqual( + [IPopoEvent.INSTANTIATED], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() # Instantiate a service, matching the filter svc1 = object() context.register_service( - IEchoService, svc1, - {"s": random_static_1, "a": consumer_single.answer}) + IEchoService, + svc1, + {"s": random_static_1, "a": consumer_single.answer}, + ) # The consumer must have been validated for consumer in consumers: self.assertListEqual( - [IPopoEvent.BOUND, IPopoEvent.VALIDATED], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + [IPopoEvent.BOUND, IPopoEvent.VALIDATED], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() self.assertIs(consumer_single.service, svc1, "Wrong service injected") - self.assertListEqual(consumer_multi.service, [svc1], - "Wrong service injected") + self.assertListEqual( + consumer_multi.service, [svc1], "Wrong service injected" + ) # New service, still matching svc2 = object() reg2 = context.register_service( - IEchoService, svc2, - {"s": random_static_1, "a": consumer_single.answer}) + IEchoService, + svc2, + {"s": random_static_1, "a": consumer_single.answer}, + ) # The single consumer must not have been modified - self.assertListEqual([], consumer_single.states, - "Invalid component states: {0}" - .format(consumer_single.states)) + self.assertListEqual( + [], + consumer_single.states, + "Invalid component states: {0}".format(consumer_single.states), + ) self.assertIs(consumer_single.service, svc1, "Wrong service injected") # The aggregate consumer must have been modified - self.assertListEqual([IPopoEvent.BOUND], consumer_multi.states, - "Invalid component states: {0}" - .format(consumer_multi.states)) - self.assertListEqual(consumer_multi.service, [svc1, svc2], - "Second service not injected") + self.assertListEqual( + [IPopoEvent.BOUND], + consumer_multi.states, + "Invalid component states: {0}".format(consumer_multi.states), + ) + self.assertListEqual( + consumer_multi.service, [svc1, svc2], "Second service not injected" + ) # Reset states for consumer in consumers: @@ -134,17 +160,22 @@ class RequiresVarFilterTest(unittest.TestCase): reg2.unregister() # The single consumer must not have been modified - self.assertListEqual([], consumer_single.states, - "Invalid component states: {0}" - .format(consumer_single.states)) + self.assertListEqual( + [], + consumer_single.states, + "Invalid component states: {0}".format(consumer_single.states), + ) self.assertIs(consumer_single.service, svc1, "Wrong service injected") # The aggregate consumer must have been modified - self.assertListEqual([IPopoEvent.UNBOUND], consumer_multi.states, - "Invalid component states: {0}" - .format(consumer_multi.states)) - self.assertListEqual(consumer_multi.service, [svc1], - "Second service not removed") + self.assertListEqual( + [IPopoEvent.UNBOUND], + consumer_multi.states, + "Invalid component states: {0}".format(consumer_multi.states), + ) + self.assertListEqual( + consumer_multi.service, [svc1], "Second service not removed" + ) # Change the filter property to the exact same value for consumer in consumers: @@ -152,14 +183,17 @@ class RequiresVarFilterTest(unittest.TestCase): consumer.change(42) # The consumer must not have been modified - self.assertListEqual([], consumer.states, - "Invalid component states: {0}" - .format(consumer.states)) + self.assertListEqual( + [], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() self.assertIs(consumer_single.service, svc1, "Wrong service injected") - self.assertListEqual(consumer_multi.service, [svc1], - "Wrong service injected") + self.assertListEqual( + consumer_multi.service, [svc1], "Wrong service injected" + ) # Change the filter property to a new value for consumer in consumers: @@ -170,60 +204,76 @@ class RequiresVarFilterTest(unittest.TestCase): self.assertListEqual( [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + "Invalid component states: {0}".format(consumer.states), + ) self.assertIs(consumer.service, None, "A service is injected") consumer.reset() # New service, matching part of the filter svc3 = object() context.register_service( - IEchoService, svc3, - {"s": random_static_2, "a": consumer_single.answer}) + IEchoService, + svc3, + {"s": random_static_2, "a": consumer_single.answer}, + ) # The consumer must not have been modified for consumer in consumers: - self.assertListEqual([], consumer.states, - "Invalid component states: {0}" - .format(consumer.states)) + self.assertListEqual( + [], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) self.assertIs(consumer.service, None, "A service is injected") consumer.reset() # New service, matching the new filer svc4 = object() reg4 = context.register_service( - IEchoService, svc4, - {"s": random_static_1, "a": consumer_single.answer}) + IEchoService, + svc4, + {"s": random_static_1, "a": consumer_single.answer}, + ) # The consumer must not have been modified for consumer in consumers: self.assertListEqual( [IPopoEvent.BOUND, IPopoEvent.VALIDATED], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() self.assertIs(consumer_single.service, svc4, "Wrong service injected") - self.assertListEqual(consumer_multi.service, [svc4], - "Wrong service injected") + self.assertListEqual( + consumer_multi.service, [svc4], "Wrong service injected" + ) # New service, matching the new filer svc5 = object() reg5 = context.register_service( - IEchoService, svc5, - {"s": random_static_1, "a": consumer_single.answer}) + IEchoService, + svc5, + {"s": random_static_1, "a": consumer_single.answer}, + ) # The single consumer must not have been modified - self.assertListEqual([], consumer_single.states, - "Invalid component states: {0}" - .format(consumer_single.states)) + self.assertListEqual( + [], + consumer_single.states, + "Invalid component states: {0}".format(consumer_single.states), + ) self.assertIs(consumer_single.service, svc4, "Wrong service injected") # The aggregate consumer must have been modified - self.assertListEqual([IPopoEvent.BOUND], consumer_multi.states, - "Invalid component states: {0}" - .format(consumer_multi.states)) - self.assertListEqual(consumer_multi.service, [svc4, svc5], - "Second service not injected") + self.assertListEqual( + [IPopoEvent.BOUND], + consumer_multi.states, + "Invalid component states: {0}".format(consumer_multi.states), + ) + self.assertListEqual( + consumer_multi.service, [svc4, svc5], "Second service not injected" + ) # Reset states for consumer in consumers: @@ -236,15 +286,19 @@ class RequiresVarFilterTest(unittest.TestCase): self.assertListEqual( rebind_states, consumer_single.states, - "Invalid component states: {0}".format(consumer_single.states)) + "Invalid component states: {0}".format(consumer_single.states), + ) self.assertIs(consumer_single.service, svc5, "Wrong service injected") # The aggregate consumer must have been modified - self.assertListEqual([IPopoEvent.UNBOUND], consumer_multi.states, - "Invalid component states: {0}" - .format(consumer_multi.states)) - self.assertListEqual(consumer_multi.service, [svc5], - "First service not removed") + self.assertListEqual( + [IPopoEvent.UNBOUND], + consumer_multi.states, + "Invalid component states: {0}".format(consumer_multi.states), + ) + self.assertListEqual( + consumer_multi.service, [svc5], "First service not removed" + ) # Reset states for consumer in consumers: @@ -255,8 +309,10 @@ class RequiresVarFilterTest(unittest.TestCase): for consumer in consumers: self.assertListEqual( - [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) self.assertIs(consumer.service, None, "A service is still injected") consumer.reset() @@ -265,9 +321,15 @@ class RequiresVarFilterTest(unittest.TestCase): Tests the @RequiresVarFilter handler without immediate_rebind (default) """ module = install_bundle(self.framework) - self.__internal_test(module, - [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND, - IPopoEvent.BOUND, IPopoEvent.VALIDATED]) + self.__internal_test( + module, + [ + IPopoEvent.INVALIDATED, + IPopoEvent.UNBOUND, + IPopoEvent.BOUND, + IPopoEvent.VALIDATED, + ], + ) def test_immediate_rebind(self): """ @@ -276,8 +338,10 @@ class RequiresVarFilterTest(unittest.TestCase): # Modify component factories module = install_bundle(self.framework) - for clazz in (module.RequiresVarFilterComponentFactory, - module.RequiresVarFilterAggregateComponentFactory): + for clazz in ( + module.RequiresVarFilterComponentFactory, + module.RequiresVarFilterAggregateComponentFactory, + ): context = get_factory_context(clazz) configs = context.get_handler(RequiresVarFilter.HANDLER_ID) configs["service"].immediate_rebind = True @@ -292,20 +356,27 @@ class RequiresVarFilterTest(unittest.TestCase): context = self.framework.get_bundle_context() assert isinstance(context, BundleContext) - random_static = ''.join(random.choice(string.ascii_letters) - for _ in range(50)) + random_static = "".join( + random.choice(string.ascii_letters) for _ in range(50) + ) # Assert that the service is not yet available - self.assertIsNone(context.get_service_reference(IEchoService), - "Service is already registered") + self.assertIsNone( + context.get_service_reference(IEchoService), + "Service is already registered", + ) # Instantiate the components consumer_single = self.ipopo.instantiate( - module.FACTORY_REQUIRES_VAR_FILTER, NAME_A, - {"static": random_static}) + module.FACTORY_REQUIRES_VAR_FILTER, + NAME_A, + {"static": random_static}, + ) consumer_multi = self.ipopo.instantiate( - module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, NAME_B, - {"static": random_static}) + module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, + NAME_B, + {"static": random_static}, + ) consumers = (consumer_single, consumer_multi) # Force the "answer" property to an int @@ -315,15 +386,22 @@ class RequiresVarFilterTest(unittest.TestCase): # Instantiate a service, matching the filter svc1 = object() context.register_service( - IEchoService, svc1, - {"s": random_static, "a": consumer_single.answer}) + IEchoService, + svc1, + {"s": random_static, "a": consumer_single.answer}, + ) # Component must be valid for consumer in consumers: self.assertListEqual( - [IPopoEvent.INSTANTIATED, IPopoEvent.BOUND, - IPopoEvent.VALIDATED], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + [ + IPopoEvent.INSTANTIATED, + IPopoEvent.BOUND, + IPopoEvent.VALIDATED, + ], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() # Set an invalid filter @@ -332,8 +410,10 @@ class RequiresVarFilterTest(unittest.TestCase): # The consumer must have been validated self.assertListEqual( - [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() self.assertIs(consumer.service, None, "A service is injected") @@ -346,12 +426,15 @@ class RequiresVarFilterTest(unittest.TestCase): # Instantiate a service, matching the filter svc = object() reg = context.register_service( - IEchoService, svc, {"s": random_static, "a": invalid}) + IEchoService, svc, {"s": random_static, "a": invalid} + ) # Nothing should happen self.assertListEqual( - [], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + [], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() reg.unregister() @@ -365,20 +448,27 @@ class RequiresVarFilterTest(unittest.TestCase): context = self.framework.get_bundle_context() assert isinstance(context, BundleContext) - random_static = ''.join(random.choice(string.ascii_letters) - for _ in range(50)) + random_static = "".join( + random.choice(string.ascii_letters) for _ in range(50) + ) # Assert that the service is not yet available - self.assertIsNone(context.get_service_reference(IEchoService), - "Service is already registered") + self.assertIsNone( + context.get_service_reference(IEchoService), + "Service is already registered", + ) # Instantiate the components consumer_single = self.ipopo.instantiate( - module.FACTORY_REQUIRES_VAR_FILTER, NAME_A, - {"static": random_static}) + module.FACTORY_REQUIRES_VAR_FILTER, + NAME_A, + {"static": random_static}, + ) consumer_multi = self.ipopo.instantiate( - module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, NAME_B, - {"static": random_static}) + module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, + NAME_B, + {"static": random_static}, + ) consumers = (consumer_single, consumer_multi) # Force the "answer" property to an int @@ -388,15 +478,22 @@ class RequiresVarFilterTest(unittest.TestCase): # Instantiate a service, matching the filter svc1 = object() context.register_service( - IEchoService, svc1, - {"s": random_static, "a": consumer_single.answer}) + IEchoService, + svc1, + {"s": random_static, "a": consumer_single.answer}, + ) # Component must be valid for consumer in consumers: self.assertListEqual( - [IPopoEvent.INSTANTIATED, IPopoEvent.BOUND, - IPopoEvent.VALIDATED], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + [ + IPopoEvent.INSTANTIATED, + IPopoEvent.BOUND, + IPopoEvent.VALIDATED, + ], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() # Set the filter with a similar value (same once formatted) @@ -406,13 +503,16 @@ class RequiresVarFilterTest(unittest.TestCase): # The consumer should not be notified for consumer in consumers: self.assertListEqual( - [], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + [], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) consumer.reset() self.assertIs(consumer_single.service, svc1, "Wrong service injected") - self.assertListEqual(consumer_multi.service, [svc1], - "Wrong service injected") + self.assertListEqual( + consumer_multi.service, [svc1], "Wrong service injected" + ) def test_incomplete_properties(self): """ @@ -423,21 +523,26 @@ class RequiresVarFilterTest(unittest.TestCase): assert isinstance(context, BundleContext) answer = 42 - random_static = ''.join(random.choice(string.ascii_letters) - for _ in range(50)) + random_static = "".join( + random.choice(string.ascii_letters) for _ in range(50) + ) # Assert that the service is not yet available - self.assertIsNone(context.get_service_reference(IEchoService), - "Service is already registered") + self.assertIsNone( + context.get_service_reference(IEchoService), + "Service is already registered", + ) # Instantiate a service, matching the filter svc1 = object() context.register_service( - IEchoService, svc1, {"s": random_static, "a": answer}) + IEchoService, svc1, {"s": random_static, "a": answer} + ) for name, factory in ( - (NAME_A, module.FACTORY_REQUIRES_VAR_FILTER), - (NAME_B, module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE)): + (NAME_A, module.FACTORY_REQUIRES_VAR_FILTER), + (NAME_B, module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE), + ): # Instantiate the component, without the static property consumer = self.ipopo.instantiate(factory, name, {}) @@ -446,14 +551,59 @@ class RequiresVarFilterTest(unittest.TestCase): # Component must be instantiated, but not valid self.assertListEqual( - [IPopoEvent.INSTANTIATED], consumer.states, - "Invalid component states: {0}".format(consumer.states)) + [IPopoEvent.INSTANTIATED], + consumer.states, + "Invalid component states: {0}".format(consumer.states), + ) self.assertIs(consumer.service, None, "Service injected") + def test_late_binding(self): + """ + Tests late binding, see issue #119: + https://github.com/tcalmant/ipopo/issues/119 + """ + install_bundle(self.framework, "tests.ipopo.issue_119_bundle") + context = self.framework.get_bundle_context() + assert isinstance(context, BundleContext) + + self.ipopo.instantiate("varservice-factory", "varservice-instance") + self.ipopo.instantiate("provider-factory", "provider-instance-1", {"prop": "svc1"}) + self.ipopo.instantiate("provider-factory", "provider-instance-2", {"prop": "svc2"}) + + svc1 = self.ipopo.get_instance("provider-instance-1") + svc2 = self.ipopo.get_instance("provider-instance-2") + consumer = self.ipopo.get_instance("varservice-instance") + + self.assertEqual(self.ipopo.get_instance_details("provider-instance-1")["state"], StoredInstance.VALID) + self.assertEqual(self.ipopo.get_instance_details("provider-instance-2")["state"], StoredInstance.VALID) + self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.INVALID) + + consumer.search = "svc1" + self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.VALID) + self.assertEqual(consumer.depends, svc1) + + consumer.search = "svc2" + self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.VALID) + self.assertEqual(consumer.depends, svc2) + + consumer.search = "non-existent" + self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.INVALID) + self.assertIsNone(consumer.depends) + + consumer.search = "svc1" + self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.VALID) + self.assertEqual(consumer.depends, svc1) + + consumer.search = None + self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.INVALID) + self.assertIsNone(consumer.depends) + + # ------------------------------------------------------------------------------ if __name__ == "__main__": # Set logging level import logging + logging.basicConfig(level=logging.DEBUG) unittest.main()
RequiresVarFilter doesn't update bindings When updating a property used in the filter of a RequiresVarFilter decorator on an invalid component instance, the bindings and the component lifecycle are not updated. Current workaround: update `pelix.ipopo.handlers.requiresvarfilter::_VariableMixIn::_reset` to call `self._ipopo_instance.update_bindings()` after `self.start()` and `self._ipopo_instance.check_lifecycle()` after the bindings for loop.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_late_binding" ]
[ "tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_immediate_rebind", "tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_incomplete_properties", "tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_invalid_filter", "tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_no_change", "tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_requires_var_filter" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2023-10-28T18:53:24Z"
apache-2.0
tech-teach__marshmallowjson-3
diff --git a/.travis.yml b/.travis.yml index 4f0901a..71234d5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,26 +1,29 @@ -# Config file for automatic testing at travis-ci.org -# This file will be regenerated if you run travis_pypi_setup.py - -language: python -python: - - 3.6 - - 3.5 - -# command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors -install: pip install -U tox-travis - -# command to run tests, e.g. python setup.py test -script: tox - -# After you create the Github repo and add it to Travis, run the -# travis_pypi_setup.py script to finish PyPI deployment setup +# This file was autogenerated and will overwrite each time you run travis_pypi_setup.py deploy: + password: + secure: !!binary | + VnZ0MytIcHhSRkxWdGFqYnd6WGo5eWZxN0E0UE9kRm5QNTFZR3FQcXQ3MFEvakp2OXhkMnZwbVcr + MElOVVNjZFVqdUljUUpCa2NrbXdPZUVzL1FuNThBRVhJRjhCMVNSL0FGTFV1bW9DTEU0YmgzcHgw + d2VSdk8rZXpkUFgxdmx0TFF4bWdLR2xHVG4yN3RlSEtTdVR6eVVWTkNzSGwrKzB5a0VSeFBPODBC + NDl4S0EvbjVPQk9YSkFYZzNXODUvaDRwUTQ0Z2NhSHg3bTdZSTBGSytRZGJTZHRWTjZrUEV1R3hJ + MlNSdkhQdVdmWjhhY0Q5eXJSVmtVRk5iUldzNTZUeEI3TUp0ajkxdEJTdGZLdTM0Z2ZITGNXdTNp + M1dQUVl3UmlZUFNFUjZvMnVZZzFsR1k3ZmJhM01ZZUVGdnRYZER3YndUcEh6T1kyYnlSd1ptSlhr + N3VCOUw1dlNLa1hyd0VOcXgyaU12Wm5jMVhNbkRqcTNnOHYvUk5XSWVoSnFoMWN4ZGtkNHhPREty + enpJMUNZbGc5b0FaL1JSYVVvR3ZuNkRtYVN5aTU2U2NZZTJWaUlld1E3Zm13eEpKQVBmRzBMY2RO + QUkrU0tCUmVqenl6bHlBSndwS20wRU1kOUx4dlRoQTVydzlwS0pVSkYzN20xWHJGUU9OK29nOUFU + YzZKK3puSUtQRmVXTWlHUU5xL1RRZUI5YTZPcnRZQmxZWjY2ZldhelppOEVXL01PSUlXQnNDU0Zm + VG9VSXNDSDAxTFNKczc0MzRjdVJORWZON1FhOVVDcnh0MGVNcnNDTVRGMWxyV28vbW9jODU0TXlZ + bmV2UlFtOHVxa0k4Q2JaTStvM0pDV2lCQXV6MStVZjdaR1R2OThlcFVvYkN1WGhZY00xTU1nd1E9 provider: pypi distributions: sdist bdist_wheel user: odarbelaeze - password: - secure: PLEASE_REPLACE_ME - on: - tags: true - repo: odarbelaeze/marshmallowjson + true: python: 3.6 + repo: odarbelaeze/marshmallowjson + tags: true +install: pip install -U tox-travis +language: python +python: +- 3.6 +- 3.5 +script: tox diff --git a/marshmallowjson/cli.py b/marshmallowjson/cli.py index 8cdfbe7..b19d723 100644 --- a/marshmallowjson/cli.py +++ b/marshmallowjson/cli.py @@ -1,14 +1,41 @@ """Console script for marshmallowjson.""" import click +import collections +import json +import sys + + +def fail(kind, type_, name): + click.echo(click.style( + '{kind} is not a known type in {type_}.{name}'.format( + kind=kind, + type_=type_, + name=name, + ), + fg='red' + )) + sys.exit(1) @click.command() -def main(args=None): - """Console script for marshmallowjson.""" - click.echo("Replace this message by putting your code into " - "marshmallowjson.cli.main") - click.echo("See click documentation at http://click.pocoo.org/") [email protected]('definition', type=click.File('r')) +def main(definition): + """Validate an schema for marshmallow json""" + known = set('string boolean uuid number integer decimal'.split()) + definitions = json.load(definition, object_pairs_hook=collections.OrderedDict) + for type_, schema in definitions.items(): + for name, field in schema.items(): + kind = field['kind'] + if kind == 'list': + items = field['items'] + if items not in known: + fail(items, type_, name) + continue + if kind not in known: + fail(kind, type_, name) + known.add(type_) + click.echo(click.style('All clear', fg='green')) if __name__ == "__main__":
tech-teach/marshmallowjson
b8a2e3edf36dc7c65b73ed108371e1b2743a4b8e
diff --git a/tests/data/basic.json b/tests/data/basic.json new file mode 100644 index 0000000..93aaa83 --- /dev/null +++ b/tests/data/basic.json @@ -0,0 +1,20 @@ +{ + "StringType": { + "field": { + "kind": "string", + "required": false + } + }, + "NumberType": { + "field": { + "kind": "number", + "required": false + } + }, + "BooleanType": { + "field": { + "kind": "boolean", + "required": false + } + } +} diff --git a/tests/data/list.json b/tests/data/list.json new file mode 100644 index 0000000..a08f382 --- /dev/null +++ b/tests/data/list.json @@ -0,0 +1,15 @@ +{ + "StringType": { + "field": { + "kind": "string", + "required": false + } + }, + "ListOfString": { + "field": { + "kind": "list", + "items": "StringType", + "required": false + } + } +} diff --git a/tests/data/unknown.json b/tests/data/unknown.json new file mode 100644 index 0000000..7bd8f98 --- /dev/null +++ b/tests/data/unknown.json @@ -0,0 +1,9 @@ +{ + "Type": { + "field": { + "kind": "Unknown", + "required": false, + "doc": "Unknow is nowhere near the type definitions, that's an error" + } + } +} diff --git a/tests/test_marshmallowjson.py b/tests/test_marshmallowjson.py index 8a3236c..62824c1 100644 --- a/tests/test_marshmallowjson.py +++ b/tests/test_marshmallowjson.py @@ -1,5 +1,6 @@ """Tests for `marshmallowjson` package.""" +import os import pytest from click.testing import CliRunner @@ -9,27 +10,45 @@ from marshmallowjson import cli @pytest.fixture -def response(): - """Sample pytest fixture. +def unknown(): + root = os.path.dirname(__file__) + return os.path.join(root, 'data/unknown.json') - See more at: http://doc.pytest.org/en/latest/fixture.html - """ - # import requests - # return requests.get('https://github.com/audreyr/cookiecutter-pypackage') + [email protected] +def basic(): + root = os.path.dirname(__file__) + return os.path.join(root, 'data/basic.json') + + [email protected] +def list_schema(): + root = os.path.dirname(__file__) + return os.path.join(root, 'data/list.json') + + +def test_error_when_using_unknown_type(unknown): + runner = CliRunner() + result = runner.invoke(cli.main, [unknown]) + assert result.exit_code == 1, result.output + assert 'Unknown is not a known type in Type.field' in result.output + + +def test_all_basic_types_are_allowed(basic): + runner = CliRunner() + result = runner.invoke(cli.main, [basic]) + assert result.exit_code == 0, result.output -def test_content(response): - """Sample pytest test function with the pytest fixture as an argument.""" - # from bs4 import BeautifulSoup - # assert 'GitHub' in BeautifulSoup(response.content).title.string +def test_lists_are_allowed(list_schema): + runner = CliRunner() + result = runner.invoke(cli.main, [list_schema]) + assert result.exit_code == 0, result.output def test_command_line_interface(): """Test the CLI.""" runner = CliRunner() - result = runner.invoke(cli.main) - assert result.exit_code == 0 - assert 'marshmallowjson.cli.main' in result.output help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output
validate json schema dependency order Is necessary works with a valid json schema, so, to get this, create a json validator for this structure: `{ "Identifier": { "catalog": { "kind": "str", "required": true, "doc": "Que vaina es esto" }, "entry": { "kind": "str", "required": true }, "uuid": { "kind": "uuid", "required": true } }, "General": { "identifier": { "kind": "object", "schema": "Identifier", "required": true }, "title": { "kind": "str", "required": true }, "keywords": { "kind": "list", "items": "str", "required": false } }, "LearningObject": { "general": { "kind": "object", "schema": "General", "required": false } } } `
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_marshmallowjson.py::test_error_when_using_unknown_type", "tests/test_marshmallowjson.py::test_all_basic_types_are_allowed", "tests/test_marshmallowjson.py::test_lists_are_allowed" ]
[ "tests/test_marshmallowjson.py::test_command_line_interface", "tests/test_marshmallowjson.py::test_avoid_warning" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2018-01-28T23:48:38Z"
mit
tefra__pytuber-20
diff --git a/pytuber/cli.py b/pytuber/cli.py index f432978..2c7d6e6 100644 --- a/pytuber/cli.py +++ b/pytuber/cli.py @@ -68,6 +68,8 @@ def add(): """Add playlist.""" +add.add_command(core.add_from_editor) +add.add_command(core.add_from_file) add.add_command(lastfm.add) diff --git a/pytuber/core/commands/__init__.py b/pytuber/core/commands/__init__.py index 27fbf7e..888f16e 100644 --- a/pytuber/core/commands/__init__.py +++ b/pytuber/core/commands/__init__.py @@ -7,6 +7,7 @@ from pytuber.core.commands.cmd_show import show from pytuber.core.commands.cmd_autocomplete import autocomplete from pytuber.core.commands.cmd_clean import clean from pytuber.core.commands.cmd_quota import quota +from pytuber.core.commands.cmd_add import add_from_editor, add_from_file __all__ = [ "setup", @@ -18,4 +19,6 @@ __all__ = [ "autocomplete", "clean", "quota", + "add_from_editor", + "add_from_file", ] diff --git a/pytuber/core/commands/cmd_add.py b/pytuber/core/commands/cmd_add.py new file mode 100644 index 0000000..e0af66b --- /dev/null +++ b/pytuber/core/commands/cmd_add.py @@ -0,0 +1,97 @@ +from typing import List + +import click +from tabulate import tabulate + +from pytuber.core.models import ( + PlaylistManager, + PlaylistType, + Provider, + TrackManager, +) +from pytuber.lastfm.commands.cmd_add import option_title +from pytuber.utils import magenta + + [email protected]("editor") +@option_title() +def add_from_editor(title: str) -> None: + """Create playlist in a text editor.""" + marker = ( + "\n\n# Copy/Paste your track list and hit save!\n" + "# One line per track, make sure it doesn't start with a #\n" + "# Separate the track artist and title with a single dash `-`\n" + ) + message = click.edit(marker) + create_playlist(title, parse_tracklist(message or "")) + + [email protected]("file") [email protected]("file", type=click.Path(), required=True) +@option_title() +def add_from_file(file: str, title: str) -> None: + """Import a playlist from a text file.""" + + with open(file, "r") as fp: + text = fp.read() + + create_playlist(title, parse_tracklist(text or "")) + + +def parse_tracklist(text): + tracks: List[tuple] = [] + for line in text.split("\n"): + line = line.strip() + if not line or line.startswith("#"): + continue + + parts = line.split("-", 1) + if len(parts) != 2: + continue + + artist, track = list(map(str.strip, parts)) + if not artist or not track or (artist, track) in tracks: + continue + + tracks.append((artist, track)) + + return tracks + + +def create_playlist(title, tracks): + if not tracks: + return click.secho("Tracklist is empty, aborting...") + + click.clear() + click.secho( + "{}\n\n{}\n".format( + tabulate( # type: ignore + [ + (magenta("Title:"), title), + (magenta("Tracks:"), len(tracks)), + ], + tablefmt="plain", + colalign=("right", "left"), + ), + tabulate( # type: ignore + [ + (i + 1, track[0], track[1]) + for i, track in enumerate(tracks) + ], + headers=("No", "Artist", "Track Name"), + ), + ) + ) + click.confirm("Are you sure you want to save this playlist?", abort=True) + playlist = PlaylistManager.set( + dict( + type=PlaylistType.EDITOR, + provider=Provider.user, + title=title.strip(), + tracks=[ + TrackManager.set(dict(artist=artist, name=name)).id + for artist, name in tracks + ], + ) + ) + click.secho("Added playlist: {}!".format(playlist.id)) diff --git a/pytuber/core/commands/s b/pytuber/core/commands/s new file mode 100644 index 0000000..e69de29 diff --git a/pytuber/core/models.py b/pytuber/core/models.py index 861373d..53f12c0 100644 --- a/pytuber/core/models.py +++ b/pytuber/core/models.py @@ -16,6 +16,14 @@ from pytuber.utils import timestamp class Provider(enum.Enum): lastfm = "last.fm" youtube = "youtube" + user = "user" + + def __str__(self): + return self.value + + +class PlaylistType(enum.Enum): + EDITOR = "editor" def __str__(self): return self.value diff --git a/pytuber/lastfm/commands/cmd_add.py b/pytuber/lastfm/commands/cmd_add.py index 1fd87a3..1f451b1 100644 --- a/pytuber/lastfm/commands/cmd_add.py +++ b/pytuber/lastfm/commands/cmd_add.py @@ -16,7 +16,7 @@ from .cmd_fetch import fetch_tracks @click.group("lastfm") def add(): - """Last.fm is a music service that learns what you love.""" + """Create playlists from Last.fm api.""" option_limit = partial(
tefra/pytuber
ae19a31c38462821ec22cd7376914ddce6a15a4f
diff --git a/tests/core/commands/test_cmd_add.py b/tests/core/commands/test_cmd_add.py new file mode 100644 index 0000000..c1fdd90 --- /dev/null +++ b/tests/core/commands/test_cmd_add.py @@ -0,0 +1,104 @@ +from unittest import mock + +from pytuber import cli +from pytuber.core.commands.cmd_add import create_playlist, parse_tracklist +from pytuber.core.models import PlaylistManager, PlaylistType, Provider +from tests.utils import CommandTestCase, PlaylistFixture + + +class CommandAddTests(CommandTestCase): + @mock.patch("click.edit") + @mock.patch("pytuber.core.commands.cmd_add.create_playlist") + @mock.patch("pytuber.core.commands.cmd_add.parse_tracklist") + def test_add_from_editor(self, parse_tracklist, create_playlist, clk_edit): + clk_edit.return_value = "foo" + parse_tracklist.return_value = ["a", "b"] + self.runner.invoke( + cli, ["add", "editor", "--title", "My Cool Playlist"] + ) + parse_tracklist.assert_called_once_with("foo") + create_playlist.assert_called_once_with("My Cool Playlist", ["a", "b"]) + + @mock.patch("pytuber.core.commands.cmd_add.create_playlist") + @mock.patch("pytuber.core.commands.cmd_add.parse_tracklist") + def test_add_from_file(self, parse_tracklist, create_playlist): + parse_tracklist.return_value = ["a", "b"] + with self.runner.isolated_filesystem(): + with open("hello.txt", "w") as f: + f.write("foo") + + self.runner.invoke( + cli, + ["add", "file", "hello.txt", "--title", "My Cool Playlist"], + ) + + parse_tracklist.assert_called_once_with("foo") + create_playlist.assert_called_once_with( + "My Cool Playlist", ["a", "b"] + ) + + +class CommandAddUtilsTests(CommandTestCase): + def test_parse_tracklist(self): + text = "\n".join( + ( + "Queen - Bohemian Rhapsody", + " Queen - Bohemian Rhapsody", + "Queen -I want to break free", + "#" " ", + "Wrong Format", + ) + ) + actual = parse_tracklist(text) + expected = [ + ("Queen", "Bohemian Rhapsody"), + ("Queen", "I want to break free"), + ] + self.assertEqual(expected, actual) + + @mock.patch("pytuber.core.commands.cmd_add.magenta") + @mock.patch.object(PlaylistManager, "set") + @mock.patch("click.confirm") + @mock.patch("click.secho") + @mock.patch("click.clear") + def test_create_playlist(self, clear, secho, confirm, set, magenta): + magenta.side_effect = lambda x: x + set.return_value = PlaylistFixture.one() + tracks = [ + ("Queen", "Bohemian Rhapsody"), + ("Queen", "I want to break free"), + ] + create_playlist("My Cool Playlist", tracks) + + expected_ouput = ( + "Title: My Cool Playlist", + "Tracks: 2", + "", + " No Artist Track Name", + "---- -------- --------------------", + " 1 Queen Bohemian Rhapsody", + " 2 Queen I want to break free", + ) + + self.assertOutput(expected_ouput, secho.call_args_list[0][0][0]) + self.assertEqual( + "Added playlist: id_a!", secho.call_args_list[1][0][0] + ) + + clear.assert_called_once_with() + confirm.assert_called_once_with( + "Are you sure you want to save this playlist?", abort=True + ) + set.assert_called_once_with( + dict( + type=PlaylistType.EDITOR, + provider=Provider.user, + title="My Cool Playlist", + tracks=["55a4d2b", "b045fee"], + ) + ) + + @mock.patch("click.secho") + def test_create_playlist_empty_tracks(self, secho): + create_playlist("foo", []) + secho.assert_called_once_with("Tracklist is empty, aborting...")
Support raw string format A file containing tracks one per line and a direct copy/paste in the terminal
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/core/commands/test_cmd_add.py::CommandAddTests::test_add_from_editor", "tests/core/commands/test_cmd_add.py::CommandAddTests::test_add_from_file", "tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_create_playlist", "tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_create_playlist_empty_tracks", "tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_parse_tracklist" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2019-02-10T17:39:49Z"
mit
tefra__xsdata-273
diff --git a/xsdata/formats/converter.py b/xsdata/formats/converter.py index b5897fe2..e222c88d 100644 --- a/xsdata/formats/converter.py +++ b/xsdata/formats/converter.py @@ -230,12 +230,12 @@ class QNameConverter(Converter): self, value: QName, ns_map: Optional[Dict] = None, **kwargs: Any ) -> str: """ - Convert a QName instance to string either with a namespace prefix if - context namespaces are provided or as fully qualified with the - namespace uri. + Convert a QName instance to string either with a namespace prefix if a + prefix-URI namespaces mapping is provided or to a fully qualified name + with the namespace. examples: - - QName("http://www.w3.org/2001/XMLSchema", "int") & namespaces -> xs:int + - QName("http://www.w3.org/2001/XMLSchema", "int") & ns_map -> xs:int - QName("foo, "bar") -> {foo}bar """ @@ -294,12 +294,12 @@ class LxmlQNameConverter(Converter): self, value: etree.QName, ns_map: Optional[Dict] = None, **kwargs: Any ) -> str: """ - Convert a QName instance to string either with a namespace prefix if - context namespaces are provided or as fully qualified with the - namespace uri. + Convert a QName instance to string either with a namespace prefix if a + prefix-URI namespaces mapping is provided or to a fully qualified name + with the namespace. examples: - - QName("http://www.w3.org/2001/XMLSchema", "int") & namespaces -> xs:int + - QName("http://www.w3.org/2001/XMLSchema", "int") & ns_map -> xs:int - QName("foo, "bar") -> {foo}bar """ @@ -319,17 +319,33 @@ class EnumConverter(Converter): # Convert string value to the type of the first enum member first, otherwise # more complex types like QName, Decimals will fail. - enum_member: Enum = list(data_type)[0] - real_value = converter.from_string(value, [type(enum_member.value)], **kwargs) + member: Enum = list(data_type)[0] + value_type = type(member.value) + + # Suppress warnings + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + real_value = converter.from_string(value, [value_type], **kwargs) + + # Raise exception if the real value doesn't match the expected type. + if not isinstance(real_value, value_type): + raise ConverterError() + + # Attempt #1 use the enum constructor + with contextlib.suppress(ValueError): + return data_type(real_value) try: - try: - return data_type(real_value) - except ValueError: - # enums may be derived from xs:NMTOKENS or xs:list - # try again after removing excess whitespace. + # Attempt #2 the enum might be derived from + # xs:NMTOKENS or xs:list removing excess whitespace. + if isinstance(real_value, str): return data_type(" ".join(value.split())) - except ValueError: + + # Attempt #3 some values are never equal try to match + # canonical representations. + repr_value = repr(real_value) + return next(x for x in data_type if repr(x.value) == repr_value) + except (ValueError, StopIteration): raise ConverterError() def to_string(self, value: Enum, **kwargs: Any) -> str:
tefra/xsdata
12fc270c6a63dfe21222f30bb65a5ca317a86ba4
diff --git a/tests/formats/test_converter.py b/tests/formats/test_converter.py index 11661a63..cfc8fc11 100644 --- a/tests/formats/test_converter.py +++ b/tests/formats/test_converter.py @@ -242,10 +242,7 @@ class EnumConverterTests(TestCase): with warnings.catch_warnings(record=True) as w: convert("a", data_type=Fixture) - self.assertEqual( - "Failed to convert value `a` to one of [<class 'float'>]", - str(w[-1].message), - ) + self.assertEqual(0, len(w)) self.assertEqual(Fixture.two_point_one, convert("2.1", data_type=Fixture)) @@ -256,6 +253,16 @@ class EnumConverterTests(TestCase): convert = self.converter.from_string self.assertEqual(Fixture.a, convert(" a \na a ", data_type=Fixture)) + def test_from_string_with_value_never_equal_to_anything(self): + class Fixture(Enum): + a = Decimal("NaN") + + convert = self.converter.from_string + self.assertEqual(Fixture.a, convert("NaN", data_type=Fixture)) + + with self.assertRaises(ConverterError): + convert("1.0", data_type=Fixture) + def test_from_string_raises_exception_on_missing_data_type(self): with self.assertRaises(ConverterError) as cm: self.converter.from_string("a")
Enum converter Decimal('NaN') != Decimal('NaN') This interesting behavior is failing the Enum converter ```python In [1]: from enum import Enum In [2]: from decimal import Decimal In [3]: class Value(Enum): ...: VALUE_9_99 = Decimal('9.99') ...: NAN = Decimal('NaN') ...: In [6]: Value(Decimal('9.99')) Out[6]: <Value.VALUE_9_99: Decimal('9.99')> In [7]: Value(Decimal('NaN')) ValueError: Decimal('NaN') is not a valid Value ``` ```python In [8]: Decimal('NaN') == Decimal('NaN') Out[8]: False ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/formats/test_converter.py::EnumConverterTests::test_from_string", "tests/formats/test_converter.py::EnumConverterTests::test_from_string_with_value_never_equal_to_anything" ]
[ "tests/formats/test_converter.py::ConverterAdapterTests::test_from_string", "tests/formats/test_converter.py::ConverterAdapterTests::test_register_converter", "tests/formats/test_converter.py::ConverterAdapterTests::test_register_converter_with_lambda", "tests/formats/test_converter.py::ConverterAdapterTests::test_to_string", "tests/formats/test_converter.py::StrConverterTests::test_from_string", "tests/formats/test_converter.py::StrConverterTests::test_to_string", "tests/formats/test_converter.py::BoolConverterTests::test_from_string", "tests/formats/test_converter.py::BoolConverterTests::test_to_string", "tests/formats/test_converter.py::IntConverterTests::test_from_string", "tests/formats/test_converter.py::IntConverterTests::test_to_string", "tests/formats/test_converter.py::FloatConverterTests::test_from_string", "tests/formats/test_converter.py::FloatConverterTests::test_to_string", "tests/formats/test_converter.py::DecimalConverterTests::test_from_string", "tests/formats/test_converter.py::DecimalConverterTests::test_to_string", "tests/formats/test_converter.py::LxmlQNameConverterTests::test_from_string", "tests/formats/test_converter.py::LxmlQNameConverterTests::test_to_string", "tests/formats/test_converter.py::QNameConverterTests::test_from_string", "tests/formats/test_converter.py::QNameConverterTests::test_to_string", "tests/formats/test_converter.py::EnumConverterTests::test_from_string_raises_exception_on_missing_data_type", "tests/formats/test_converter.py::EnumConverterTests::test_from_string_with_list_derived_enum", "tests/formats/test_converter.py::EnumConverterTests::test_to_string", "tests/formats/test_converter.py::ProxyConverterTests::test_from_string", "tests/formats/test_converter.py::ProxyConverterTests::test_to_string" ]
{ "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2020-09-29T15:58:09Z"
mit
tefra__xsdata-358
diff --git a/docs/xml.rst b/docs/xml.rst index 47ba1e30..61e62dcd 100644 --- a/docs/xml.rst +++ b/docs/xml.rst @@ -167,6 +167,9 @@ context instance between them to save on memory and processing. * - xml_version - str - XML Version number, default: ``1.0`` + * - xml_declaration + - bool + - Generate XML declaration, default ``True`` * - pretty_print - bool - Enable pretty output, default ``False`` diff --git a/xsdata/formats/dataclass/serializers/config.py b/xsdata/formats/dataclass/serializers/config.py index e7369247..4a408c4a 100644 --- a/xsdata/formats/dataclass/serializers/config.py +++ b/xsdata/formats/dataclass/serializers/config.py @@ -8,6 +8,7 @@ class SerializerConfig: """ :param encoding: Text encoding :param xml_version: XML Version number (1.0|1.1) + :param xml_declaration: Generate XML declaration :param pretty_print: Enable pretty output :param schema_location: Specify the xsi:schemaLocation attribute value :param no_namespace_schema_location: Specify the xsi:noNamespaceSchemaLocation @@ -16,6 +17,7 @@ class SerializerConfig: encoding: str = field(default="UTF-8") xml_version: str = field(default="1.0") + xml_declaration: bool = field(default=True) pretty_print: bool = field(default=False) schema_location: Optional[str] = field(default=None) no_namespace_schema_location: Optional[str] = field(default=None) diff --git a/xsdata/formats/dataclass/serializers/mixins.py b/xsdata/formats/dataclass/serializers/mixins.py index 76b8a597..c4b44989 100644 --- a/xsdata/formats/dataclass/serializers/mixins.py +++ b/xsdata/formats/dataclass/serializers/mixins.py @@ -91,8 +91,9 @@ class XmlWriter: self.handler.endDocument() def start_document(self): - self.output.write(f'<?xml version="{self.config.xml_version}"') - self.output.write(f' encoding="{self.config.encoding}"?>\n') + if self.config.xml_declaration: + self.output.write(f'<?xml version="{self.config.xml_version}"') + self.output.write(f' encoding="{self.config.encoding}"?>\n') def start_tag(self, qname: str): """
tefra/xsdata
a2c51f5bcdcaf2be620a43c9f80f831da16cefc8
diff --git a/tests/formats/dataclass/serializers/writers/test_lxml.py b/tests/formats/dataclass/serializers/writers/test_lxml.py index d8cb5ff9..2d92f070 100644 --- a/tests/formats/dataclass/serializers/writers/test_lxml.py +++ b/tests/formats/dataclass/serializers/writers/test_lxml.py @@ -41,6 +41,14 @@ class LxmlEventWriterTests(TestCase): self.assertEqual('<?xml version="1.1" encoding="US-ASCII"?>', xml_declaration) + def test_declaration_disabled(self): + self.serializer.config.xml_declaration = False + actual = self.serializer.render(books, {None: "urn:books"}) + expected = fixtures_dir.joinpath("books/books_default_ns.xml").read_text() + xml_declaration, expected = expected.split("\n", 1) + + self.assertEqual(expected, actual) + def test_pretty_print_false(self): self.serializer.config.pretty_print = False actual = self.serializer.render(books) diff --git a/tests/formats/dataclass/serializers/writers/test_native.py b/tests/formats/dataclass/serializers/writers/test_native.py index 30185195..309a9a78 100644 --- a/tests/formats/dataclass/serializers/writers/test_native.py +++ b/tests/formats/dataclass/serializers/writers/test_native.py @@ -35,6 +35,14 @@ class XmlEventWriterTests(TestCase): self.assertEqual('<?xml version="1.1" encoding="US-ASCII"?>', xml_declaration) + def test_declaration_disabled(self): + self.serializer.config.xml_declaration = False + actual = self.serializer.render(books, {None: "urn:books"}) + expected = fixtures_dir.joinpath("books/books_default_ns.xml").read_text() + xml_declaration, expected = expected.split("\n", 1) + + self.assertEqual(expected, actual) + def test_pretty_print_false(self): self.serializer.config.pretty_print = False actual = self.serializer.render(books)
SerializerConfig introduced 2 issues Hello, 1st issue is that the constructor interface for XmlSerializer changed. ```python XmlSerializer(encoding=None) ``` no longer works. This is a smaller issue as I can change my code to support the new library (but it's still a change that isn't backwards compatible) 2nd issue is that the XmlWriter no longer supports encoding=None. This is a blocker, as I have a rest API that will not accept a payload with an encoding.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_declaration_disabled", "tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_declaration_disabled" ]
[ "tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_encoding", "tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_pretty_print_false", "tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_render", "tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_render_with_default_namespace_prefix", "tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_render_with_provided_namespaces", "tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_encoding", "tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_pretty_print_false", "tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_render", "tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_render_with_default_namespace_prefix", "tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_render_with_provided_namespaces" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-12-17T22:53:17Z"
mit
tefra__xsdata-364
diff --git a/xsdata/formats/dataclass/serializers/xml.py b/xsdata/formats/dataclass/serializers/xml.py index 487ce89d..7a0e88f7 100644 --- a/xsdata/formats/dataclass/serializers/xml.py +++ b/xsdata/formats/dataclass/serializers/xml.py @@ -161,8 +161,8 @@ class XmlSerializer(AbstractSerializer): def write_tokens(self, value: Any, var: XmlVar, namespace: NoneStr) -> Generator: """Produce an events stream for the given tokens list or list of tokens lists.""" - if value: - if isinstance(value[0], list): + if value or var.nillable: + if value and isinstance(value[0], list): for val in value: yield from self.write_element(val, var, namespace) else:
tefra/xsdata
ff428d68c61f254609465012cc62c49f3b88e575
diff --git a/tests/formats/dataclass/serializers/test_xml.py b/tests/formats/dataclass/serializers/test_xml.py index 10f1a9fe..7f065a9e 100644 --- a/tests/formats/dataclass/serializers/test_xml.py +++ b/tests/formats/dataclass/serializers/test_xml.py @@ -167,6 +167,17 @@ class XmlSerializerTests(TestCase): result = self.serializer.write_value([[1, 2, 3], [4, 5, 6]], var, "xsdata") self.assertEqual(expected, list(result)) + var = XmlElement(qname="a", name="a", tokens=True, nillable=True) + expected = [ + (XmlWriterEvent.START, "a"), + (XmlWriterEvent.ATTR, QNames.XSI_NIL, "true"), + (XmlWriterEvent.DATA, []), + (XmlWriterEvent.END, "a"), + ] + + result = self.serializer.write_value([], var, "xsdata") + self.assertEqual(expected, list(result)) + def test_write_any_type_with_primitive(self): var = XmlWildcard(qname="a", name="a") expected = [(XmlWriterEvent.DATA, "str")]
XmlSerializer render empty nillable tokens lists
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_tokens" ]
[ "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_next_attribute", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_next_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_render_mixed_content", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_generic_object", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_primitive", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_primitive_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_when_no_matching_choice_exists", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_derived_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_derived_primitive_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_generic_object", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_raw_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_data", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass_can_overwrite_params", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass_with_no_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_any_type_var", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_any_type_var_ignore_xs_string", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_nillable_true", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_mixed_content", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_object_with_derived_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_value_with_list_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_value_with_unhandled_xml_var", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type_with_derived_class", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type_with_illegal_derived_class" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2020-12-20T15:52:19Z"
mit
tehmaze__ansi-34
diff --git a/ansi/colour/rgb.py b/ansi/colour/rgb.py index 13fbc02..6eb1e29 100644 --- a/ansi/colour/rgb.py +++ b/ansi/colour/rgb.py @@ -56,7 +56,7 @@ def rgb16(r: int, g: int, b: int) -> str: return rgb_reduce(r, g, b, 16) -def rgb256(r: int, g: int, b: int) -> str: +def rgb256(r: int, g: int, b: int, bg: bool=False) -> str: """ Convert an RGB colour to 256 colour ANSI graphics. @@ -79,4 +79,4 @@ def rgb256(r: int, g: int, b: int) -> str: colour = sum([16] + [int(6 * float(val) / 256) * mod for val, mod in ((r, 36), (g, 6), (b, 1))]) - return sequence('m', fields=3)(38, 5, colour) + return sequence('m', fields=3)(38 if not bg else 48, 5, colour)
tehmaze/ansi
f80c14bcee8a9c4b4aecbd88c24ba4818c64db77
diff --git a/test_ansi.py b/test_ansi.py index a12d704..a15a75e 100644 --- a/test_ansi.py +++ b/test_ansi.py @@ -40,6 +40,11 @@ def test_rgb() -> None: msg = (rgb256(0xff, 0x80, 0x00), 'hello world', reset) assert ''.join(map(str, msg)) == '\x1b[38;5;214mhello world\x1b[0m' +def test_rgb_bg() -> None: + from ansi.colour.rgb import rgb256 + from ansi.colour.fx import reset + msg = (rgb256(0xff, 0x80, 0x00, bg=True), 'hello world', reset) + assert ''.join(map(str, msg)) == '\x1b[48;5;214mhello world\x1b[0m' def test_osc() -> None: from ansi import osc
RGB background color support hi! Adding a switch to `rgb256(r,g,b, bg=True)` you could return ``` if bg: return sequence('m', fields=3)(48, 5, colour) else: return sequence('m', fields=3)(38, 5, colour) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test_ansi.py::test_rgb_bg" ]
[ "test_ansi.py::test_import", "test_ansi.py::test_import_color", "test_ansi.py::test_fg_bg", "test_ansi.py::test_sugar", "test_ansi.py::test_rgb", "test_ansi.py::test_osc", "test_ansi.py::test_iterm", "test_ansi.py::test_add", "test_ansi.py::test_add_to_string", "test_ansi.py::test_add_other", "test_ansi.py::test_empty", "test_ansi.py::test_erase" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2024-01-22T00:42:39Z"
mit
templateflow__python-client-59
diff --git a/.circleci/config.yml b/.circleci/config.yml index 115902f..f9399f7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -45,7 +45,7 @@ jobs: source /tmp/venv/bin/activate pip install -U pip pip install -r /tmp/src/templateflow/requirements.txt - pip install "datalad ~= 0.11.8" doi2bib + pip install "datalad ~= 0.11.8" "doi2bib < 0.4" pip install "setuptools>=42.0" "setuptools_scm[toml] >= 3.4" twine codecov - run: diff --git a/setup.cfg b/setup.cfg index 16b5cc9..1012368 100644 --- a/setup.cfg +++ b/setup.cfg @@ -49,7 +49,7 @@ exclude = [options.extras_require] citations = - doi2bib + doi2bib < 0.4.0 datalad = datalad ~= 0.12.0 doc = diff --git a/templateflow/conf/__init__.py b/templateflow/conf/__init__.py index f4b387b..9d29cf2 100644 --- a/templateflow/conf/__init__.py +++ b/templateflow/conf/__init__.py @@ -45,11 +45,19 @@ please set the TEMPLATEFLOW_HOME environment variable.\ def update(local=False, overwrite=True, silent=False): """Update an existing DataLad or S3 home.""" if TF_USE_DATALAD and _update_datalad(): - return True - - from ._s3 import update as _update_s3 + success = True + else: + from ._s3 import update as _update_s3 + success = _update_s3(TF_HOME, local=local, overwrite=overwrite, silent=silent) - return _update_s3(TF_HOME, local=local, overwrite=overwrite, silent=silent) + # update Layout only if necessary + if success and TF_LAYOUT is not None: + init_layout() + # ensure the api uses the updated layout + import importlib + from .. import api + importlib.reload(api) + return success def setup_home(force=False): @@ -76,9 +84,12 @@ def _update_datalad(): TF_LAYOUT = None -try: + + +def init_layout(): from .bids import Layout + global TF_LAYOUT TF_LAYOUT = Layout( TF_HOME, validate=False, @@ -92,5 +103,9 @@ try: "scripts", ], ) + + +try: + init_layout() except ImportError: pass diff --git a/templateflow/conf/_s3.py b/templateflow/conf/_s3.py index 9e20cbe..4051ce8 100644 --- a/templateflow/conf/_s3.py +++ b/templateflow/conf/_s3.py @@ -27,7 +27,7 @@ def _get_skeleton_file(): import requests try: - r = requests.get(TF_SKEL_URL(release="master", ext="md5", allow_redirects=True)) + r = requests.get(TF_SKEL_URL(release="master", ext="md5"), allow_redirects=True) except requests.exceptions.ConnectionError: return @@ -35,7 +35,7 @@ def _get_skeleton_file(): return if r.content.decode().split()[0] != TF_SKEL_MD5: - r = requests.get(TF_SKEL_URL(release="master", ext="zip", allow_redirects=True)) + r = requests.get(TF_SKEL_URL(release="master", ext="zip"), allow_redirects=True) if r.ok: from os import close
templateflow/python-client
1c473dfa23eac7eed8cefd1ce9fad5ad52331a2e
diff --git a/templateflow/tests/test_conf.py b/templateflow/tests/test_conf.py new file mode 100644 index 0000000..c2d95f2 --- /dev/null +++ b/templateflow/tests/test_conf.py @@ -0,0 +1,27 @@ +from pathlib import Path +import pytest +from .. import conf, api + + [email protected](conf.TF_USE_DATALAD, reason="S3 only") +def test_update_s3(tmp_path): + conf.TF_HOME = tmp_path / 'templateflow' + conf.TF_HOME.mkdir(exist_ok=True) + + # replace TF_SKEL_URL with the path of a legacy skeleton + _skel_url = conf._s3.TF_SKEL_URL + conf._s3.TF_SKEL_URL = ( + "https://github.com/templateflow/python-client/raw/0.5.0/" + "templateflow/conf/templateflow-skel.{ext}".format + ) + # initialize templateflow home, making sure to pull the legacy skeleton + conf.update(local=False) + # ensure we can grab a file + assert Path(api.get('MNI152NLin2009cAsym', resolution=2, desc='brain', suffix='mask')).exists() + # and ensure we can't fetch one that doesn't yet exist + assert not api.get('Fischer344', hemi='L', desc='brain', suffix='mask') + + # refresh the skeleton using the most recent skeleton + conf._s3.TF_SKEL_URL = _skel_url + conf.update(local=True, overwrite=True) + assert Path(api.get('Fischer344', hemi='L', desc='brain', suffix='mask')).exists()
TemplateFlow should re-index the BIDS layout after update Currently, when `TEMPLATEFLOW_HOME` has been updated in the same python session, the index of the archive remains out-of-date.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "templateflow/tests/test_conf.py::test_update_s3" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-08-17T21:56:30Z"
apache-2.0
tempoCollaboration__OQuPy-71
diff --git a/oqupy/mps_mpo.py b/oqupy/mps_mpo.py index 29e080c..6a24a85 100644 --- a/oqupy/mps_mpo.py +++ b/oqupy/mps_mpo.py @@ -198,14 +198,13 @@ def compute_nn_gate( nn_gate: NnGate Nearest neighbor gate. """ - # exponentiate and transpose such that - # axis 0 is the input and axis 1 is the output leg of the propagator. - propagator = linalg.expm(dt*liouvillian).T + # exponentiate the liouvillian to become a propagator + propagator = linalg.expm(dt*liouvillian) # split leg 0 and leg 1 each into left and right. - propagator.shape = [hs_dim_l**2, - hs_dim_r**2, - hs_dim_l**2, - hs_dim_r**2] + propagator.shape = [hs_dim_l**2, # left output + hs_dim_r**2, # right output + hs_dim_l**2, # left input + hs_dim_r**2] # right input temp = np.swapaxes(propagator, 1, 2) temp = temp.reshape([hs_dim_l**2 * hs_dim_l**2, hs_dim_r**2 * hs_dim_r**2]) @@ -217,7 +216,9 @@ def compute_nn_gate( sqrt_s = np.sqrt(s) u_sqrt_s = u * sqrt_s sqrt_s_vh =(sqrt_s * vh.T).T + # left tensor with legs: left output, left input, bond tensor_l = u_sqrt_s.reshape(hs_dim_l**2, hs_dim_l**2, chi) + # right tensor with legs: bond, right output, right input tensor_r = sqrt_s_vh.reshape(chi, hs_dim_r**2, hs_dim_r**2) return NnGate(site=site, tensors=(tensor_l, tensor_r)) diff --git a/oqupy/operators.py b/oqupy/operators.py index 2566dcf..8dde09e 100644 --- a/oqupy/operators.py +++ b/oqupy/operators.py @@ -197,7 +197,7 @@ def cross_left_right_super( operator_2_l: ndarray, operator_2_r: ndarray) -> ndarray: """ - Construct anit-commutator of cross term (acting on two Hilbert spaces). + Contruct map from rho to [(op1l x op2l) rho (op1r x op2r)]. """ op1l_op1r = np.kron(operator_1_l, operator_1_r.T) op2l_op2r = np.kron(operator_2_l, operator_2_r.T) diff --git a/oqupy/system.py b/oqupy/system.py index a7d11c1..184a68b 100644 --- a/oqupy/system.py +++ b/oqupy/system.py @@ -429,7 +429,8 @@ class SystemChain(BaseAPIClass): self._nn_liouvillians = [] for hs_dim_l, hs_dim_r in zip(self._hs_dims[:-1], self._hs_dims[1:]): self._nn_liouvillians.append( - np.zeros((hs_dim_l**4, hs_dim_r**4), dtype=NpDtype)) + np.zeros((hs_dim_l**2 * hs_dim_r**2, hs_dim_l**2 * hs_dim_r**2), + dtype=NpDtype)) super().__init__(name, description) @@ -496,7 +497,7 @@ class SystemChain(BaseAPIClass): liouvillian: ndarray Liouvillian acting on the single site. """ - raise NotImplementedError() + self._site_liouvillians[site] += np.array(liouvillian, dtype=NpDtype) def add_site_dissipation( self, @@ -525,12 +526,13 @@ class SystemChain(BaseAPIClass): gamma: float Optional multiplicative factor :math:`\gamma`. """ - op = lindblad_operator + op = np.array(lindblad_operator, dtype=NpDtype) op_dagger = op.conjugate().T self._site_liouvillians[site] += \ - gamma * (opr.left_right_super(op, op_dagger) + gamma * (opr.left_right_super(op, op_dagger) \ - 0.5 * opr.acommutator(np.dot(op_dagger, op))) + def add_nn_hamiltonian( self, site: int, @@ -585,7 +587,7 @@ class SystemChain(BaseAPIClass): liouvillian_l_r: ndarray Liouvillian acting on sites :math:`n` and :math:`n+1`. """ - self._nn_liouvillians[site] += liouvillian_l_r + self._nn_liouvillians[site] += np.array(liouvillian_l_r, dtype=NpDtype) def add_nn_dissipation( self,
tempoCollaboration/OQuPy
b3355f4c8a6e7001275e78c287d52f6d25c96e53
diff --git a/tests/coverage/pt_tebd_test.py b/tests/coverage/pt_tebd_test.py index 80e47fc..b2fcc54 100644 --- a/tests/coverage/pt_tebd_test.py +++ b/tests/coverage/pt_tebd_test.py @@ -17,11 +17,12 @@ Tests for the time_evovling_mpo.pt_tebd module. import pytest +import numpy as np import oqupy up_dm = oqupy.operators.spin_dm("z+") -system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2]) -initial_augmented_mps = oqupy.AugmentedMPS([up_dm, up_dm]) +system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,3]) +initial_augmented_mps = oqupy.AugmentedMPS([up_dm, np.diag([1,0,0])]) pt_tebd_params = oqupy.PtTebdParameters(dt=0.2, order=2, epsrel=1.0e-4) def test_get_augmented_mps(): @@ -32,8 +33,10 @@ def test_get_augmented_mps(): parameters=pt_tebd_params) augmented_mps = pt_tebd.get_augmented_mps() - assert augmented_mps.gammas[1].shape == (1,4,1,1) + assert augmented_mps.gammas[0].shape == (1,4,1,1) + assert augmented_mps.gammas[1].shape == (1,9,1,1) - pt_tebd.compute(end_step=1, progress_type='silent') + pt_tebd.compute(end_step=2, progress_type='silent') augmented_mps = pt_tebd.get_augmented_mps() - assert augmented_mps.gammas[1].shape == (1,4,1,1) + assert augmented_mps.gammas[0].shape == (1,4,1,1) + assert augmented_mps.gammas[1].shape == (1,9,1,1) diff --git a/tests/physics/example_H_test.py b/tests/physics/example_H_test.py new file mode 100644 index 0000000..2688cc7 --- /dev/null +++ b/tests/physics/example_H_test.py @@ -0,0 +1,101 @@ +# Copyright 2020 The TEMPO Collaboration +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Tests for the time_evovling_mpo.backends.tensor_network modules. +""" +import sys +sys.path.insert(0,'.') + +import pytest +import numpy as np + +import oqupy + +# ----------------------------------------------------------------------------- +# -- Test F: Test Lindblad dissipation for PT-TEBD --------------------------- + +# --- Parameters -------------------------------------------------------------- + +# -- time steps -- +dt = 0.1 +num_steps = 10 + +# -- bath -- +alpha = 0.3 +omega_cutoff = 3.0 +temperature = 0.8 +pt_dkmax = 10 +pt_epsrel = 1.0e-6 + +# -- chain -- +N = 5 +Omega = 1.0 +eta = 0.3 +Delta = 1.2 +h = np.array( + [[1.0, 0.0, 0.0], + [2.0, 0.0, 0.0], + [3.0, 0.0, 0.0], + [4.0, 0.0, 0.0], + [5.0, 0.0, 0.0]]) * np.pi / 10 +J = np.array([[Delta, 1.0+eta, 1.0-eta]]*(N-1)) +up_dm = oqupy.operators.spin_dm("z+") +down_dm = oqupy.operators.spin_dm("z-") +tebd_order = 2 +tebd_epsrel = 1.0e-7 + + +def test_pt_tebd_site_dissipation_H1(): + # -- initial state -- + initial_augmented_mps = oqupy.AugmentedMPS([up_dm, down_dm, down_dm]) + + # -- add single site dissipation -- + system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2,2]) + # lowering operator on site 0: + system_chain.add_site_dissipation(0,[[0,0],[1,0]]) + # identity cross raising operator on sites 1 and 2: + system_chain.add_nn_dissipation(1,np.identity(2),[[0,1],[0,0]]) + + # -- PT-TEBD parameters -- + pt_tebd_params = oqupy.PtTebdParameters( + dt=dt, + order=tebd_order, + epsrel=tebd_epsrel) + + num_steps = int(1.0/pt_tebd_params.dt) + + pt_tebd = oqupy.PtTebd( + initial_augmented_mps=initial_augmented_mps, + system_chain=system_chain, + process_tensors=[None]*3, + parameters=pt_tebd_params, + dynamics_sites=[0,1,2], + chain_control=None) + + r = pt_tebd.compute(num_steps, progress_type="silent") + + np.testing.assert_almost_equal( + r['dynamics'][0].states[-1], + [[np.exp(-1),0],[0,1-np.exp(-1)]], + decimal=4) + np.testing.assert_almost_equal( + r['dynamics'][1].states[-1], + [[0,0],[0,1]], + decimal=4) + np.testing.assert_almost_equal( + r['dynamics'][2].states[-1], + [[1-np.exp(-1),0],[0,np.exp(-1)]], + decimal=4) + +# -----------------------------------------------------------------------------
Bug in SystemChain.add_site_dissipation() Adding a Markovian Lindblad dissipator to a system chain seems to go wrong, as one can see by the decay of the norm in the following example with dissipation on the first of a two site chain (without any coherent evolution): ```python3 import oqupy import numpy as np import matplotlib.pyplot as plt sigma_z = oqupy.operators.sigma("z") sigma_minus = oqupy.operators.sigma("-") up_dm = oqupy.operators.spin_dm("z+") down_dm = oqupy.operators.spin_dm("z-") initial_augmented_mps = oqupy.AugmentedMPS([up_dm, down_dm]) system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2]) system_chain.add_site_dissipation(0, sigma_minus, gamma=0.2) pt_tebd_params = oqupy.PtTebdParameters( dt=0.1, order=2, epsrel=1.0e-6) pt_tebd = oqupy.PtTebd( initial_augmented_mps=initial_augmented_mps, system_chain=system_chain, process_tensors=[None, None, None, None, None], parameters=pt_tebd_params, dynamics_sites=[0, 1], chain_control=None) num_steps = 20 results = pt_tebd.compute(num_steps, progress_type="bar") plt.plot(results['norm'].real) ``` The norm drops below 0.7 in 20 time steps, which seems to be a real bug and not just a numerical error.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/coverage/pt_tebd_test.py::test_get_augmented_mps", "tests/physics/example_H_test.py::test_pt_tebd_site_dissipation_H1" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-08-07T14:28:26Z"
apache-2.0
tempoCollaboration__OQuPy-74
diff --git a/.gitignore b/.gitignore index 73b69be..9057a01 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # TEMPO file formats *.tempoDynamics *.processTensor +*.hdf5 # local development local_dev/ diff --git a/oqupy/process_tensor.py b/oqupy/process_tensor.py index b80569f..d37f05e 100644 --- a/oqupy/process_tensor.py +++ b/oqupy/process_tensor.py @@ -533,10 +533,10 @@ class FileProcessTensor(BaseProcessTensor): # transforms transform_in = np.array(self._f["transform_in"]) - if transform_in == 0.0: + if np.allclose(transform_in, np.array([0.0])): transform_in = None transform_out = np.array(self._f["transform_out"]) - if transform_out == 0.0: + if np.allclose(transform_out, np.array([0.0])): transform_out = None # initial tensor and mpo/cap/lam tensors
tempoCollaboration/OQuPy
be1c8bc45db3411aaebc213c2b2f52cb8d52e55f
diff --git a/tests/coverage/process_tensor_test.py b/tests/coverage/process_tensor_test.py index 32879e5..89acbef 100644 --- a/tests/coverage/process_tensor_test.py +++ b/tests/coverage/process_tensor_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The TEMPO Collaboration +# Copyright 2022 The oqupy Collaboration # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,43 +17,57 @@ Tests for the time_evovling_mpo.process_tensor module. import pytest -import oqupy as tempo +import oqupy - -TEMP_FILE = "tests/data/temp.processTensor" +TEMP_FILE_1 = "./tests/data/temp1.hdf5" +TEMP_FILE_2 = "./tests/data/temp2.hdf5" # -- prepare a process tensor ------------------------------------------------- -system = tempo.System(tempo.operators.sigma("x")) -initial_state = tempo.operators.spin_dm("z+") -correlations = tempo.PowerLawSD( +system = oqupy.System(oqupy.operators.sigma("x")) +initial_state = oqupy.operators.spin_dm("z+") +correlations = oqupy.PowerLawSD( alpha=0.3, zeta=1.0, cutoff=5.0, cutoff_type="exponential", temperature=0.2, name="ohmic") -bath = tempo.Bath( - 0.5*tempo.operators.sigma("z"), +bath1 = oqupy.Bath( + 0.5*oqupy.operators.sigma("z"), + correlations, + name="phonon bath") +bath2 = oqupy.Bath( + 0.5*oqupy.operators.sigma("x"), correlations, name="phonon bath") -tempo_params = tempo.TempoParameters( +tempo_params = oqupy.TempoParameters( dt=0.1, dkmax=5, epsrel=10**(-5)) -pt = tempo.pt_tempo_compute( - bath, +pt1 = oqupy.pt_tempo_compute( + bath1, + start_time=0.0, + end_time=0.3, + parameters=tempo_params) +pt2 = oqupy.pt_tempo_compute( + bath2, start_time=0.0, - end_time=1.0, + end_time=0.3, parameters=tempo_params) -pt.export(TEMP_FILE, overwrite=True) -del pt +pt1.export(TEMP_FILE_1, overwrite=True) +pt2.export(TEMP_FILE_2, overwrite=True) +del pt1 +del pt2 def test_process_tensor(): - pt = tempo.import_process_tensor(TEMP_FILE, process_tensor_type="simple") - str(pt) - pt.get_bond_dimensions() + pt1 = oqupy.import_process_tensor(TEMP_FILE_1, process_tensor_type="simple") + str(pt1) + pt1.get_bond_dimensions() with pytest.raises(OSError): - pt.export(TEMP_FILE) + pt1.export(TEMP_FILE_1) + pt2 = oqupy.import_process_tensor(TEMP_FILE_2, process_tensor_type="file") + str(pt2) + pt2.get_bond_dimensions()
Bug when loading a process tensor from file I've found a bug when loading a process tensor with non-diagonal coupling from a file. Here is a (minimal) failing example: ```python import oqupy TEMP_FILE = "./temp-process-tensor.hdf5" system = oqupy.System(oqupy.operators.sigma("x")) initial_state = oqupy.operators.spin_dm("z+") correlations = oqupy.PowerLawSD( alpha=0.3, zeta=1.0, cutoff=5.0, cutoff_type="exponential", temperature=0.2, name="ohmic")bath = oqupy.Bath( 0.5*oqupy.operators.sigma("x"), correlations) tempo_params = oqupy.TempoParameters( dt=0.1, dkmax=5, epsrel=10**(-5)) pt = oqupy.pt_tempo_compute( bath, start_time=0.0, end_time=0.3, process_tensor_file=TEMP_FILE, overwrite=True, parameters=tempo_params) del pt pt = oqupy.import_process_tensor(TEMP_FILE, process_tensor_type="file") ``` This is the output: ``` --> PT-TEMPO computation: 100.0% 2 of 2 [########################################] 00:00:00 Elapsed time: 0.0s Traceback (most recent call last): File "./examples/fail.py", line 33, in <module> pt = oqupy.import_process_tensor(TEMP_FILE, process_tensor_type="file") File "./oqupy/process_tensor.py", line 729, in import_process_tensor pt_file = FileProcessTensor(mode="read", filename=filename) File "./oqupy/process_tensor.py", line 457, in __init__ dictionary = self._read_file(filename) File "./oqupy/process_tensor.py", line 536, in _read_file if transform_in == 0.0: ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/coverage/process_tensor_test.py::test_process_tensor" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-10-07T08:09:54Z"
apache-2.0
tenable__pyTenable-664
diff --git a/tenable/io/v3/base/iterators/explore_iterator.py b/tenable/io/v3/base/iterators/explore_iterator.py index 737e03d..20c5dc2 100644 --- a/tenable/io/v3/base/iterators/explore_iterator.py +++ b/tenable/io/v3/base/iterators/explore_iterator.py @@ -69,7 +69,9 @@ class SearchIterator(ExploreIterator): Process the API Response ''' body = response.json() - pagination = body.get('pagination', {}) + # Pagination value can be null in JSON response, we need to make sure + # a dict is returned + pagination = body.get('pagination') or {} self.page = body[self._resource] self.total = pagination.get('total') self._next_token = pagination.get('next')
tenable/pyTenable
08ce435d75dfa953931582ed2806ca289b7c5fe0
diff --git a/tests/io/test_search_iterator_v3.py b/tests/io/test_search_iterator_v3.py index 8c063cd..8b896a5 100644 --- a/tests/io/test_search_iterator_v3.py +++ b/tests/io/test_search_iterator_v3.py @@ -2,6 +2,7 @@ Testing the search iterators for V3 endpoints ''' import pytest +import json from tenable.io.v3.base.iterators.explore_iterator import SearchIterator @@ -21,7 +22,6 @@ ASSET_DATA = [ ] - @pytest.mark.vcr() def test_search_iterator_v3(api): ''' @@ -61,3 +61,22 @@ def test_search_iterator_v3(api): with pytest.raises(StopIteration): next(search_iterator) + + +def test_search_iterator_v3_null_pagination(api): + ''' + Test for null pagination in SearchIterator._process_response + ''' + search_iterator = SearchIterator( + api=api + ) + class TempJson: + def json(self): + return json.loads(json.dumps({'findings': [{'id': 'abcdef'}], + 'pagination': None + }) + ) + search_iterator._resource = "findings" + search_iterator._process_response(TempJson()) + assert search_iterator.total == None + assert search_iterator._next_token == None
Crash when pagination value is null in JSON response **Describe the bug** The following program crashed, since sometime the server returns JSON data which are not correctly handle by the `SearchIterator` class in `explore_iterator.py`. **To Reproduce** Steps to reproduce the behavior: I'm calling the API endpoint (`/api/v3/findings/vulnerabilities/webapp/search`) on cloud.tenable.com, using `tio.v3.explore.findings.search_webapp` function with some arguments: ```json { "limit": 200, "filter": { "and": [ { "operator": "eq", "value": "redacted", "property": "asset_id" } ] }, "fields": [ "asset", "definition.id", "definition.vpr.score", "definition.exploitability_ease" ], "next": "**redacted**" } ``` The server responds (sometimes): ```json {"findings":[**redacted**],"pagination":null} ``` Since the python code is: ```python def _process_response(self, response: Response) -> None: ''' Process the API Response ''' body = response.json() pagination = body.get('pagination', {}) self.page = body[self._resource] self.total = pagination.get('total') self._next_token = pagination.get('next') ``` The key `pagination` actually exists in JSON response, so `pagination` variable will be `None` and not a dict. Crash happens line 8 (line 72 in the actual code). ``` Traceback (most recent call last): ***redacted*** File "/redacted/lib/python3.10/site-packages/restfly/iterator.py", line 114, in __next__ return self.next() # noqa: PLE1102 File "/redacted/lib/python3.10/site-packages/restfly/iterator.py", line 140, in next self._get_page() File "/redacted/lib/python3.10/site-packages/tenable/io/v3/base/iterators/explore_iterator.py", line 59, in _get_page self._process_response(resp) File "/redacted/lib/python3.10/site-packages/tenable/io/v3/base/iterators/explore_iterator.py", line 74, in _process_response self.total = pagination.get('total') AttributeError: 'NoneType' object has no attribute 'get' ``` **Expected behavior** The code should not crashed. **System Information (please complete the following information):** - OS: Linux - Architecture 64bit
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/io/test_search_iterator_v3.py::test_search_iterator_v3_null_pagination" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2023-01-06T15:40:08Z"
mit
tensorflow__agents-913
diff --git a/tf_agents/environments/batched_py_environment.py b/tf_agents/environments/batched_py_environment.py index 99fbc2b3..b5041374 100644 --- a/tf_agents/environments/batched_py_environment.py +++ b/tf_agents/environments/batched_py_environment.py @@ -26,7 +26,7 @@ from __future__ import print_function from multiprocessing import dummy as mp_threads from multiprocessing import pool # pylint: enable=line-too-long -from typing import Sequence, Optional +from typing import Any, Optional, Sequence import gin import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import @@ -182,6 +182,21 @@ class BatchedPyEnvironment(py_environment.PyEnvironment): ) return nest_utils.stack_nested_arrays(time_steps) + def seed(self, seed: types.Seed) -> Any: + """Seeds the environment.""" + return self._execute(lambda env: env.seed(seed), self._envs) + + def get_state(self) -> Any: + """Returns the `state` of the environment.""" + return self._execute(lambda env: env.get_state(), self._envs) + + def set_state(self, state: Sequence[Any]) -> None: + """Restores the environment to a given `state`.""" + self._execute( + lambda env_state: env_state[0].set_state(env_state[1]), + zip(self._envs, state) + ) + def render(self, mode="rgb_array") -> Optional[types.NestedArray]: if self._num_envs == 1: img = self._envs[0].render(mode)
tensorflow/agents
27b851f4daad092345f07cd2525115a8f3ed5224
diff --git a/tf_agents/environments/batched_py_environment_test.py b/tf_agents/environments/batched_py_environment_test.py index 9cdf9637..3fc6e4a4 100644 --- a/tf_agents/environments/batched_py_environment_test.py +++ b/tf_agents/environments/batched_py_environment_test.py @@ -38,10 +38,21 @@ class GymWrapperEnvironmentMock(random_py_environment.RandomPyEnvironment): def __init__(self, *args, **kwargs): super(GymWrapperEnvironmentMock, self).__init__(*args, **kwargs) self._info = {} + self._state = {'seed': 0} def get_info(self): return self._info + def seed(self, seed): + self._state['seed'] = seed + return super(GymWrapperEnvironmentMock, self).seed(seed) + + def get_state(self): + return self._state + + def set_state(self, state): + self._state = state + def _step(self, action): self._info['last_action'] = action return super(GymWrapperEnvironmentMock, self)._step(action) @@ -116,6 +127,32 @@ class BatchedPyEnvironmentTest(tf.test.TestCase, parameterized.TestCase): self.assertAllEqual(info['last_action'], action) gym_env.close() + @parameterized.parameters(*COMMON_PARAMETERS) + def test_seed_gym_env(self, multithreading): + num_envs = 5 + gym_env = self._make_batched_mock_gym_py_environment( + multithreading, num_envs=num_envs + ) + + gym_env.seed(42) + + actual_seeds = [state['seed'] for state in gym_env.get_state()] + self.assertEqual(actual_seeds, [42] * num_envs) + gym_env.close() + + @parameterized.parameters(*COMMON_PARAMETERS) + def test_state_gym_env(self, multithreading): + num_envs = 5 + gym_env = self._make_batched_mock_gym_py_environment( + multithreading, num_envs=num_envs + ) + state = [{'value': i * 10} for i in range(num_envs)] + + gym_env.set_state(state) + + self.assertEqual(gym_env.get_state(), state) + gym_env.close() + @parameterized.parameters(*COMMON_PARAMETERS) def test_step(self, multithreading): num_envs = 5
PyEnvironment Methods Incompatible with TF The docstring for `tf_py_environment.__getattr__` indicates that certain PyEnvironment methods might be incompatible with TF. ```python def __getattr__(self, name: Text) -> Any: """Enables access attributes of the wrapped PyEnvironment. Use with caution since methods of the PyEnvironment can be incompatible with TF. Args: name: Name of the attribute. Returns: The attribute. """ if name in self.__dict__: return getattr(self, name) return getattr(self._env, name) ``` What makes a Py Environment method incompatible with tensorflow? I ran across this issue when trying to call `.seed` on the `tf_py_environment`. I implemented a `.seed` function for my subclass of `py_environment`, but calling `.seed` on the wrapper doesn't lead to the `.seed` function of the subclass being called. Perhaps this is intentional?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_seed_gym_env0", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_seed_gym_env1", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_state_gym_env0", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_state_gym_env1" ]
[ "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_close_no_hang_after_init0", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_close_no_hang_after_init1", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_get_info_gym_env0", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_get_info_gym_env1", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_get_specs0", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_get_specs1", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_step0", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_step1", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_unstack_actions", "tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_unstack_nested_actions" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2024-01-21T14:46:51Z"
apache-2.0
tensorly__tensorly-170
diff --git a/tensorly/backend/numpy_backend.py b/tensorly/backend/numpy_backend.py index 45aead6..2688ae9 100644 --- a/tensorly/backend/numpy_backend.py +++ b/tensorly/backend/numpy_backend.py @@ -53,7 +53,6 @@ class NumpyBackend(Backend): return np.sum(np.abs(tensor)**order, axis=axis)**(1 / order) def kr(self, matrices, weights=None, mask=None): - if mask is None: mask = 1 n_columns = matrices[0].shape[1] n_factors = len(matrices) @@ -66,7 +65,8 @@ class NumpyBackend(Backend): if weights is not None: matrices = [m if i else m*self.reshape(weights, (1, -1)) for i, m in enumerate(matrices)] - return np.einsum(operation, *matrices).reshape((-1, n_columns))*mask + m = mask.reshape((-1, 1)) if mask is not None else 1 + return np.einsum(operation, *matrices).reshape((-1, n_columns))*m @property def SVD_FUNS(self):
tensorly/tensorly
b39c65182d24874e154de2d1563d4882086f0641
diff --git a/tensorly/decomposition/tests/test_candecomp_parafac.py b/tensorly/decomposition/tests/test_candecomp_parafac.py index e537743..dcd8c45 100644 --- a/tensorly/decomposition/tests/test_candecomp_parafac.py +++ b/tensorly/decomposition/tests/test_candecomp_parafac.py @@ -75,6 +75,21 @@ def test_parafac(): error = T.norm(tensor - rec, 2)/T.norm(tensor) assert_(error < tol) + +def test_masked_parafac(): + """Test for the masked CANDECOMP-PARAFAC decomposition. + This checks that a mask of 1's is identical to the unmasked case. + """ + rng = check_random_state(1234) + tensor = T.tensor(rng.random_sample((3, 3, 3))) + mask = T.tensor(np.ones((3, 3, 3))) + + mask_fact = parafac(tensor, rank=2, mask=mask) + fact = parafac(tensor, rank=2) + diff = kruskal_to_tensor(mask_fact) - kruskal_to_tensor(fact) + assert_(T.norm(diff) < 0.01, 'norm 2 of reconstruction higher than 0.01') + + def test_non_negative_parafac(): """Test for non-negative PARAFAC
Masked CP decomposition returns error in numpy backend kr Hi, like milanlanlan in [Handling missing data in decomposition #4](https://github.com/tensorly/tensorly/issues/4#issuecomment-557899752) I am trying to decompose a tensor using the CP decomposition with missing values. Unfortunately, I also receive this error message: > 5 mask_parafac[score_sparse > 0] = 1 6 print(mask_parafac) 7 rec_score_tensor = parafac(score_sparse, rank=4, mask = mask) 8 9 print(rec_score_tensor) tensorly/decomposition/candecomp_parafac.py in parafac(tensor, rank, n_iter_max, init, svd, normalize_factors, tol, orthogonalise, random_state, verbose, return_errors, non_negative, mask) 183 184 if mask is not None: 185 tensor = tensor*mask + tl.kruskal_to_tensor((None, factors), mask=1-mask) 186 187 mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode) tensorly/kruskal_tensor.py in kruskal_to_tensor(kruskal_tensor, mask) 186 T.transpose(khatri_rao(factors, skip_matrix=0))) 187 else: 188 full_tensor = T.sum(khatri_rao([factors[0]*weights]+factors[1:], mask=mask), axis=1) 189 190 return fold(full_tensor, 0, shape) tensorly/tenalg/_khatri_rao.py in khatri_rao(matrices, weights, skip_matrix, reverse, mask) 96 # Note: we do NOT use .reverse() which would reverse matrices even outside this function 97 98 return T.kr(matrices, weights=weights, mask=mask) tensorly/backend/__init__.py in inner(*args, **kwargs) 158 159 def inner(*args, **kwargs): 160 return _get_backend_method(name)(*args, **kwargs) 161 162 # We don't use `functools.wraps` here because some of the dispatched tensorly/backend/numpy_backend.py in kr(self, matrices, weights, mask) 67 matrices = [m if i else m*self.reshape(weights, (1, -1)) for i, m in enumerate(matrices)] 68 69 return np.einsum(operation, *matrices).reshape((-1, n_columns))*mask 70 #tensor = np.einsum(operation, *matrices).reshape((-1, n_columns)) 71 #return tensor* mask.reshape(tensor.shape) ValueError: operands could not be broadcast together with shapes (80,4) (5,4,4) < As the number of elements in both arrays is not equal it seems like a simple reshape will not solve the problem.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tensorly/decomposition/tests/test_candecomp_parafac.py::test_masked_parafac" ]
[ "tensorly/decomposition/tests/test_candecomp_parafac.py::test_parafac", "tensorly/decomposition/tests/test_candecomp_parafac.py::test_non_negative_parafac", "tensorly/decomposition/tests/test_candecomp_parafac.py::test_sample_khatri_rao", "tensorly/decomposition/tests/test_candecomp_parafac.py::test_randomised_parafac" ]
{ "failed_lite_validators": [ "has_issue_reference" ], "has_test_patch": true, "is_lite": false }
"2020-05-19T01:28:26Z"
bsd-3-clause
tensorly__tensorly-221
diff --git a/tensorly/decomposition/__init__.py b/tensorly/decomposition/__init__.py index d43985d..9b4f4c0 100644 --- a/tensorly/decomposition/__init__.py +++ b/tensorly/decomposition/__init__.py @@ -8,7 +8,7 @@ from ._nn_cp import non_negative_parafac from ._tucker import tucker, partial_tucker, non_negative_tucker, Tucker from .robust_decomposition import robust_pca from ._tt import TensorTrain, tensor_train, tensor_train_matrix -from .parafac2 import parafac2, Parafac2 +from ._parafac2 import parafac2, Parafac2 from ._symmetric_cp import symmetric_parafac_power_iteration, symmetric_power_iteration, SymmetricCP from ._cp_power import parafac_power_iteration, power_iteration, CPPower diff --git a/tensorly/decomposition/parafac2.py b/tensorly/decomposition/_parafac2.py similarity index 76% rename from tensorly/decomposition/parafac2.py rename to tensorly/decomposition/_parafac2.py index 82fed36..eb0cadf 100644 --- a/tensorly/decomposition/parafac2.py +++ b/tensorly/decomposition/_parafac2.py @@ -285,95 +285,94 @@ def parafac2(tensor_slices, rank, n_iter_max=100, init='random', svd='numpy_svd' class Parafac2(DecompositionMixin): + r"""PARAFAC2 decomposition [1]_ of a third order tensor via alternating least squares (ALS) - def __init__(self, rank, n_iter_max=100, init='random', svd='numpy_svd', normalize_factors=False, - tol=1e-8, random_state=None, verbose=False, n_iter_parafac=5): - r"""PARAFAC2 decomposition [1]_ of a third order tensor via alternating least squares (ALS) + Computes a rank-`rank` PARAFAC2 decomposition of the third-order tensor defined by + `tensor_slices`. The decomposition is on the form :math:`(A [B_i] C)` such that the + i-th frontal slice, :math:`X_i`, of :math:`X` is given by - Computes a rank-`rank` PARAFAC2 decomposition of the third-order tensor defined by - `tensor_slices`. The decomposition is on the form :math:`(A [B_i] C)` such that the - i-th frontal slice, :math:`X_i`, of :math:`X` is given by + .. math:: + + X_i = B_i diag(a_i) C^T, + + where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries are equal to + the :math:`i`-th row of the :math:`I \times R` factor matrix :math:`A`, :math:`B_i` + is a :math:`J_i \times R` factor matrix such that the cross product matrix :math:`B_{i_1}^T B_{i_1}` + is constant for all :math:`i`, and :math:`C` is a :math:`K \times R` factor matrix. + To compute this decomposition, we reformulate the expression for :math:`B_i` such that - .. math:: - - X_i = B_i diag(a_i) C^T, - - where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries are equal to - the :math:`i`-th row of the :math:`I \times R` factor matrix :math:`A`, :math:`B_i` - is a :math:`J_i \times R` factor matrix such that the cross product matrix :math:`B_{i_1}^T B_{i_1}` - is constant for all :math:`i`, and :math:`C` is a :math:`K \times R` factor matrix. - To compute this decomposition, we reformulate the expression for :math:`B_i` such that + .. math:: - .. math:: + B_i = P_i B, - B_i = P_i B, + where :math:`P_i` is a :math:`J_i \times R` orthogonal matrix and :math:`B` is a + :math:`R \times R` matrix. - where :math:`P_i` is a :math:`J_i \times R` orthogonal matrix and :math:`B` is a - :math:`R \times R` matrix. + An alternative formulation of the PARAFAC2 decomposition is that the tensor element + :math:`X_{ijk}` is given by - An alternative formulation of the PARAFAC2 decomposition is that the tensor element - :math:`X_{ijk}` is given by + .. math:: - .. math:: + X_{ijk} = \sum_{r=1}^R A_{ir} B_{ijr} C_{kr}, + + with the same constraints hold for :math:`B_i` as above. + - X_{ijk} = \sum_{r=1}^R A_{ir} B_{ijr} C_{kr}, - - with the same constraints hold for :math:`B_i` as above. + Parameters + ---------- + tensor_slices : ndarray or list of ndarrays + Either a third order tensor or a list of second order tensors that may have different number of rows. + Note that the second mode factor matrices are allowed to change over the first mode, not the + third mode as some other implementations use (see note below). + rank : int + Number of components. + n_iter_max : int + Maximum number of iteration + init : {'svd', 'random', CPTensor, Parafac2Tensor} + Type of factor matrix initialization. See `initialize_factors`. + svd : str, default is 'numpy_svd' + function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS + normalize_factors : bool (optional) + If True, aggregate the weights of each factor in a 1D-tensor + of shape (rank, ), which will contain the norms of the factors. Note that + there may be some inaccuracies in the component weights. + tol : float, optional + (Default: 1e-8) Relative reconstruction error tolerance. The + algorithm is considered to have found the global minimum when the + reconstruction error is less than `tol`. + random_state : {None, int, np.random.RandomState} + verbose : int, optional + Level of verbosity + n_iter_parafac: int, optional + Number of PARAFAC iterations to perform for each PARAFAC2 iteration + + Returns + ------- + Parafac2Tensor : (weight, factors, projection_matrices) + * weights : 1D array of shape (rank, ) + all ones if normalize_factors is False (default), + weights of the (normalized) factors otherwise + * factors : List of factors of the CP decomposition element `i` is of shape + (tensor.shape[i], rank) + * projection_matrices : List of projection matrices used to create evolving + factors. + References + ---------- + .. [1] Kiers, H.A.L., ten Berge, J.M.F. and Bro, R. (1999), + PARAFAC2—Part I. A direct fitting algorithm for the PARAFAC2 model. + J. Chemometrics, 13: 275-294. - Parameters - ---------- - tensor_slices : ndarray or list of ndarrays - Either a third order tensor or a list of second order tensors that may have different number of rows. - Note that the second mode factor matrices are allowed to change over the first mode, not the - third mode as some other implementations use (see note below). - rank : int - Number of components. - n_iter_max : int - Maximum number of iteration - init : {'svd', 'random', CPTensor, Parafac2Tensor} - Type of factor matrix initialization. See `initialize_factors`. - svd : str, default is 'numpy_svd' - function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS - normalize_factors : bool (optional) - If True, aggregate the weights of each factor in a 1D-tensor - of shape (rank, ), which will contain the norms of the factors. Note that - there may be some inaccuracies in the component weights. - tol : float, optional - (Default: 1e-8) Relative reconstruction error tolerance. The - algorithm is considered to have found the global minimum when the - reconstruction error is less than `tol`. - random_state : {None, int, np.random.RandomState} - verbose : int, optional - Level of verbosity - n_iter_parafac: int, optional - Number of PARAFAC iterations to perform for each PARAFAC2 iteration - - Returns - ------- - Parafac2Tensor : (weight, factors, projection_matrices) - * weights : 1D array of shape (rank, ) - all ones if normalize_factors is False (default), - weights of the (normalized) factors otherwise - * factors : List of factors of the CP decomposition element `i` is of shape - (tensor.shape[i], rank) - * projection_matrices : List of projection matrices used to create evolving - factors. - - References - ---------- - .. [1] Kiers, H.A.L., ten Berge, J.M.F. and Bro, R. (1999), - PARAFAC2—Part I. A direct fitting algorithm for the PARAFAC2 model. - J. Chemometrics, 13: 275-294. - - Notes - ----- - This formulation of the PARAFAC2 decomposition is slightly different from the one in [1]_. - The difference lies in that here, the second mode changes over the first mode, whereas in - [1]_, the second mode changes over the third mode. We made this change since that means - that the function accept both lists of matrices and a single nd-array as input without - any reordering of the modes. - """ + Notes + ----- + This formulation of the PARAFAC2 decomposition is slightly different from the one in [1]_. + The difference lies in that here, the second mode changes over the first mode, whereas in + [1]_, the second mode changes over the third mode. We made this change since that means + that the function accept both lists of matrices and a single nd-array as input without + any reordering of the modes. + """ + def __init__(self, rank, n_iter_max=100, init='random', svd='numpy_svd', normalize_factors=False, + tol=1e-8, random_state=None, verbose=False, n_iter_parafac=5): self.rank = rank self.n_iter_max=n_iter_max self.init=init
tensorly/tensorly
e51fd09f3bacc77641aef76128330c479c5ce7d7
diff --git a/tensorly/decomposition/tests/test_parafac2.py b/tensorly/decomposition/tests/test_parafac2.py index db925e1..8c6f982 100644 --- a/tensorly/decomposition/tests/test_parafac2.py +++ b/tensorly/decomposition/tests/test_parafac2.py @@ -7,7 +7,7 @@ import tensorly as tl from ...random import check_random_state, random_parafac2 from ... import backend as T from ...testing import assert_array_equal, assert_ -from ..parafac2 import parafac2, initialize_decomposition, _pad_by_zeros +from .._parafac2 import parafac2, initialize_decomposition, _pad_by_zeros from ...parafac2_tensor import parafac2_to_tensor, parafac2_to_slices
Enhancement: Improve naming consistency Almost all decomposition implementations are stored in a file with the name `_{{decomposition_name}}`. The exceptions are PARAFAC2 and Robust PCA. Should we rename these modules to `_parafac2.py` and `_robust_decomposition.py`, or is there a reason for these exceptions?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tensorly/decomposition/tests/test_parafac2.py::test_parafac2[True-random]", "tensorly/decomposition/tests/test_parafac2.py::test_parafac2[True-svd]", "tensorly/decomposition/tests/test_parafac2.py::test_parafac2[False-random]", "tensorly/decomposition/tests/test_parafac2.py::test_parafac2[False-svd]", "tensorly/decomposition/tests/test_parafac2.py::test_parafac2_slice_and_tensor_input", "tensorly/decomposition/tests/test_parafac2.py::test_parafac2_normalize_factors", "tensorly/decomposition/tests/test_parafac2.py::test_parafac2_init_valid", "tensorly/decomposition/tests/test_parafac2.py::test_parafac2_init_error", "tensorly/decomposition/tests/test_parafac2.py::test_parafac2_to_tensor", "tensorly/decomposition/tests/test_parafac2.py::test_pad_by_zeros" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-01-01T19:21:08Z"
bsd-3-clause
tensorly__tensorly-320
diff --git a/tensorly/backend/core.py b/tensorly/backend/core.py index ae9beb6..41c483a 100644 --- a/tensorly/backend/core.py +++ b/tensorly/backend/core.py @@ -1079,6 +1079,9 @@ class Backend(object): S = np.sqrt(np.clip(S, 0, None)) S = np.clip(S, np.finfo(S.dtype).eps, None) # To avoid divide by zero warning on next line V = np.dot(matrix.T.conj(), U * np.where(np.abs(S) <= np.finfo(S.dtype).eps, 0, 1/S)[None, :]) + U, S, V = U[:, ::-1], S[::-1], V[:, ::-1] + V, R = np.linalg.qr(V) + V = V * (2*(np.diag(R) >= 0) - 1) # we can't use np.sign because np.sign(0) == 0 else: S, V = scipy.sparse.linalg.eigsh( np.dot(matrix.T.conj(), matrix), k=n_eigenvecs, which='LM', v0=v0 @@ -1086,9 +1089,11 @@ class Backend(object): S = np.sqrt(np.clip(S, 0, None)) S = np.clip(S, np.finfo(S.dtype).eps, None) U = np.dot(matrix, V) * np.where(np.abs(S) <= np.finfo(S.dtype).eps, 0, 1/S)[None, :] + U, S, V = U[:, ::-1], S[::-1], V[:, ::-1] + U, R = np.linalg.qr(U) + U = U * (2*(np.diag(R) >= 0) - 1) # WARNING: here, V is still the transpose of what it should be - U, S, V = U[:, ::-1], S[::-1], V[:, ::-1] V = V.T.conj() if flip: diff --git a/tensorly/backend/pytorch_backend.py b/tensorly/backend/pytorch_backend.py index 267cda3..b96e7d7 100644 --- a/tensorly/backend/pytorch_backend.py +++ b/tensorly/backend/pytorch_backend.py @@ -148,6 +148,10 @@ class PyTorchBackend(Backend): def stack(arrays, axis=0): return torch.stack(arrays, dim=axis) + @staticmethod + def diag(tensor, k=0): + return torch.diag(tensor, diagonal=k) + @staticmethod def sort(tensor, axis, descending = False): if axis is None: @@ -212,7 +216,7 @@ class PyTorchBackend(Backend): for name in ['float64', 'float32', 'int64', 'int32', 'complex128', 'complex64', 'is_tensor', 'ones', 'zeros', 'any', 'trace', 'cumsum', 'tensordot', 'zeros_like', 'reshape', 'eye', 'max', 'min', 'prod', 'abs', 'matmul', - 'sqrt', 'sign', 'where', 'conj', 'diag', 'finfo', 'einsum', 'log2', 'sin', 'cos']: + 'sqrt', 'sign', 'where', 'conj', 'finfo', 'einsum', 'log2', 'sin', 'cos']: PyTorchBackend.register_method(name, getattr(torch, name)) diff --git a/tensorly/backend/tensorflow_backend.py b/tensorly/backend/tensorflow_backend.py index 3da3142..1d89fb5 100644 --- a/tensorly/backend/tensorflow_backend.py +++ b/tensorly/backend/tensorflow_backend.py @@ -191,7 +191,7 @@ _FUN_NAMES = [ (np.complex64, 'complex64'), (tf.ones, 'ones'), (tf.zeros, 'zeros'), - (tf.linalg.tensor_diag, 'diag'), + (tf.linalg.diag, 'diag'), (tf.zeros_like, 'zeros_like'), (tf.eye, 'eye'), (tf.reshape, 'reshape'),
tensorly/tensorly
5a6992ad14ec64d59a8b6d341ae14de98092687b
diff --git a/tensorly/tests/test_backend.py b/tensorly/tests/test_backend.py index 04818a3..c73c24f 100644 --- a/tensorly/tests/test_backend.py +++ b/tensorly/tests/test_backend.py @@ -161,7 +161,7 @@ def test_svd(): assert_(left_orthogonality_error <= tol_orthogonality, msg='Left eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format( name, tl.get_backend(), n, s)) - right_orthogonality_error = T.norm(T.dot(T.transpose(fU), fU) - T.eye(n)) + right_orthogonality_error = T.norm(T.dot(fV, T.transpose(fV)) - T.eye(n)) assert_(right_orthogonality_error <= tol_orthogonality, msg='Right eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format( name, tl.get_backend(), n, s)) @@ -180,6 +180,14 @@ def test_svd(): assert_(np.isfinite(T.to_numpy(U)).all(), msg="Left singular vectors are not finite") assert_(np.isfinite(T.to_numpy(V)).all(), msg="Right singular vectors are not finite") + # Test orthonormality when max_dim > n_eigenvecs > matrix_rank + matrix = tl.dot(tl.randn((4, 2), seed=1), tl.randn((2, 4), seed=12)) + U, S, V = tl.partial_svd(matrix, n_eigenvecs=3, random_state=0) + left_orthogonality_error = T.norm(T.dot(T.transpose(U), U) - T.eye(3)) + assert_(left_orthogonality_error <= tol_orthogonality) + right_orthogonality_error = T.norm(T.dot(V, T.transpose(V)) - T.eye(3)) + assert_(right_orthogonality_error <= tol_orthogonality) + # Test if partial_svd returns the same result for the same setting matrix = T.tensor(np.random.random((20, 5))) random_state = np.random.RandomState(0)
[BUG] singular vectors in `tl.partial_svd` are not orthonormal When `n_eigenvecs` is greater than the input matrix rank, the returned singular values are not orthonormal. However, the result is still a valid low-rank decomposition (<img src="https://render.githubusercontent.com/render/math?math=A = USV">). ```python3 import tensorly as tl import numpy as np np.random.seed(0) a = tl.dot(tl.randn((4, 2)), tl.randn((2, 4))) u, s, v = tl.partial_svd(a, n_eigenvecs=3) print(tl.dot(u.T, u)) ``` ``` [[ 1. -0. 0.] [-0. 1. 0.] [ 0. 0. 0.]] ``` <img src="https://render.githubusercontent.com/render/math?math=U^\top U \neq I"> ☹️. --- To fix this bug we could use the Gram–Schmidt process and extend the singular vectors basis corresponding to the zero singular values.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tensorly/tests/test_backend.py::test_svd" ]
[ "tensorly/tests/test_backend.py::test_backend_and_tensorly_module_attributes", "tensorly/tests/test_backend.py::test_tensor_creation", "tensorly/tests/test_backend.py::test_svd_time", "tensorly/tests/test_backend.py::test_randomized_range_finder", "tensorly/tests/test_backend.py::test_shape", "tensorly/tests/test_backend.py::test_ndim", "tensorly/tests/test_backend.py::test_norm", "tensorly/tests/test_backend.py::test_clip", "tensorly/tests/test_backend.py::test_where", "tensorly/tests/test_backend.py::test_lstsq", "tensorly/tests/test_backend.py::test_qr", "tensorly/tests/test_backend.py::test_prod", "tensorly/tests/test_backend.py::test_index_update" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-08-28T06:51:58Z"
bsd-3-clause
tensorly__tensorly-455
diff --git a/doc/modules/api.rst b/doc/modules/api.rst index ad8c2a1..d6004d6 100644 --- a/doc/modules/api.rst +++ b/doc/modules/api.rst @@ -328,6 +328,10 @@ Note that these are currently experimental and may change in the future. Parafac2 SymmetricCP ConstrainedCP + TensorTrain + TensorRing + TensorTrainMatrix + Functions --------- @@ -352,6 +356,7 @@ Functions robust_pca tensor_train tensor_train_matrix + tensor_ring parafac2 constrained_parafac diff --git a/tensorly/backend/__init__.py b/tensorly/backend/__init__.py index 35b44f5..326dde0 100644 --- a/tensorly/backend/__init__.py +++ b/tensorly/backend/__init__.py @@ -105,6 +105,7 @@ class BackendManager(types.ModuleType): "asinh", "acosh", "atanh", + "partial_svd", ] _attributes = [ "int64", diff --git a/tensorly/backend/core.py b/tensorly/backend/core.py index fb43e7e..7b4a1e1 100644 --- a/tensorly/backend/core.py +++ b/tensorly/backend/core.py @@ -1262,3 +1262,11 @@ class Backend(object): def atanh(self, x): """Return the arctanh of x.""" return self.arctanh(x) + + def partial_svd(self, *args, **kwargs): + msg = ( + "partial_svd is no longer used. " + "Please use tensorly.tenalg.svd_interface instead, " + "it provides a unified interface to all available SVD implementations." + ) + raise NotImplementedError(msg) diff --git a/tensorly/decomposition/__init__.py b/tensorly/decomposition/__init__.py index 61332d3..7998a94 100644 --- a/tensorly/decomposition/__init__.py +++ b/tensorly/decomposition/__init__.py @@ -13,8 +13,9 @@ from ._tucker import ( Tucker, ) from .robust_decomposition import robust_pca -from ._tt import TensorTrain, tensor_train, tensor_train_matrix -from ._tr import tensor_ring +from ._tt import tensor_train, tensor_train_matrix +from ._tt import TensorTrain, TensorTrainMatrix +from ._tr import tensor_ring, TensorRing from ._parafac2 import parafac2, Parafac2 from ._symmetric_cp import ( symmetric_parafac_power_iteration, diff --git a/tensorly/decomposition/_tt.py b/tensorly/decomposition/_tt.py index 961c516..9929193 100644 --- a/tensorly/decomposition/_tt.py +++ b/tensorly/decomposition/_tt.py @@ -136,6 +136,40 @@ def tensor_train_matrix(tensor, rank, svd="truncated_svd", verbose=False): class TensorTrain(DecompositionMixin): + """Decompose a tensor into a matrix in tt-format + + Parameters + ---------- + tensor : tensorized matrix + if your input matrix is of size (4, 9) and your tensorized_shape (2, 2, 3, 3) + then tensor should be tl.reshape(matrix, (2, 2, 3, 3)) + rank : 'same', float or int tuple + - if 'same' creates a decomposition with the same number of parameters as `tensor` + - if float, creates a decomposition with `rank` x the number of parameters of `tensor` + - otherwise, the actual rank to be used, e.g. (1, rank_2, ..., 1) of size tensor.ndim//2. Note that boundary conditions dictate that the first rank = last rank = 1. + svd : str, default is 'truncated_svd' + function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS + verbose : boolean, optional + level of verbosity + + Returns + ------- + tt_matrix + """ + + def __init__(self, rank, svd="truncated_svd", verbose=False): + self.rank = rank + self.svd = svd + self.verbose = verbose + + def fit_transform(self, tensor): + self.decomposition_ = tensor_train( + tensor, rank=self.rank, svd=self.svd, verbose=self.verbose + ) + return self.decomposition_ + + +class TensorTrainMatrix(DecompositionMixin): """TT decomposition via recursive SVD Decomposes `input_tensor` into a sequence of order-3 tensors (factors) @@ -169,7 +203,7 @@ class TensorTrain(DecompositionMixin): self.verbose = verbose def fit_transform(self, tensor): - self.decomposition_ = tensor_train( + self.decomposition_ = tensor_train_matrix( tensor, rank=self.rank, svd=self.svd, verbose=self.verbose ) return self.decomposition_
tensorly/tensorly
a0d58621349a0100fef6ada87ead5646ba37e5f4
diff --git a/tensorly/decomposition/tests/test_tt_decomposition.py b/tensorly/decomposition/tests/test_tt_decomposition.py index 48675da..1bf7322 100644 --- a/tensorly/decomposition/tests/test_tt_decomposition.py +++ b/tensorly/decomposition/tests/test_tt_decomposition.py @@ -1,6 +1,6 @@ import pytest import tensorly as tl -from .._tt import tensor_train, tensor_train_matrix, TensorTrain +from .._tt import tensor_train, tensor_train_matrix, TensorTrain, TensorTrainMatrix from ...tt_matrix import tt_matrix_to_tensor from ...random import random_tt from ...testing import ( @@ -89,10 +89,14 @@ def test_tensor_train(monkeypatch): # TODO: Remove once MXNet supports transpose for > 6th order tensors @skip_mxnet -def test_tensor_train_matrix(): +def test_tensor_train_matrix(monkeypatch): """Test for tensor_train_matrix decomposition""" tensor = random_tt((2, 2, 2, 3, 3, 3), rank=2, full=True) tt = tensor_train_matrix(tensor, 10) tt_rec = tt_matrix_to_tensor(tt) assert_array_almost_equal(tensor, tt_rec, decimal=4) + + assert_class_wrapper_correctly_passes_arguments( + monkeypatch, tensor_train_matrix, TensorTrainMatrix, ignore_args={}, rank=3 + )
Add a deprecation for tl.partial_svd to suggest tl.tenalg.svd_interface The issue in the tests comes from the fact that we changed the SVD interface in #429, the best would be to use `tensorly.tenalg.svd_interface`. @aarmey perhaps we should deprecate tl.partial_svd or at least raise an error if users try to use it, to let them know they should switch to the new interface? _Originally posted by @JeanKossaifi in https://github.com/tensorly/tensorly/issues/411#issuecomment-1217122493_
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tensorly/decomposition/tests/test_tt_decomposition.py::test_tensor_train", "tensorly/decomposition/tests/test_tt_decomposition.py::test_tensor_train_matrix" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-10-31T20:54:30Z"
bsd-3-clause
terean-dspd__numbers-2-rus-3
diff --git a/num2rus/converter.py b/num2rus/converter.py index 89d38e1..212840d 100644 --- a/num2rus/converter.py +++ b/num2rus/converter.py @@ -131,7 +131,7 @@ NUMBERS = { 40: "сорок", 50: "пятьдесят", 60: "шестьдесят", - 70: "семьдесять", + 70: "семьдесят", 80: "восемьдесят", 90: "девяносто", }, diff --git a/num2rus/main.py b/num2rus/main.py index e586db5..6049e24 100644 --- a/num2rus/main.py +++ b/num2rus/main.py @@ -30,7 +30,7 @@ def chopper(num: int) -> Tuple[str, str]: marks them as `миллиард`, `миллион`, `тысяча`, `сотня`, `единица`, `десятка` """ - num_str = str(num) + num_str = str(num) # '20' while len(num_str) > 0: if 13 > len(num_str) >= 10: step = len(num_str) - 9 @@ -50,9 +50,12 @@ def chopper(num: int) -> Tuple[str, str]: if len(num_str) == 1: yield num_str, 'единица' break - if len(num_str) == 2: + if len(num_str) == 2 and num_str[0] == '1': yield num_str, 'десятка' break + else: + yield num_str, 'сотня' + break def decimal_parser(number_str: str, zero_on: bool = True) -> Tuple[str, str]: @@ -157,7 +160,6 @@ def converter(number: float, zero_on: bool = True) -> str: dec_str = "{0:.2f}".format(decimal - integet_part) decimal_part: str = dec_str.split('.')[1] result = '' - tl_g = '' for number, size in chopper(integet_part): string, tl, rub = main_parser(number, size) result += string + tl + ' ' @@ -169,4 +171,4 @@ def converter(number: float, zero_on: bool = True) -> str: if __name__ == "__main__": # num = input() - converter(34102) + converter(20)
terean-dspd/numbers-2-rus
a6ee2b20be1c34b3b306591e2fa6e1fb68771326
diff --git a/num2rus/tests.py b/num2rus/tests.py index d3ac079..02acd9b 100644 --- a/num2rus/tests.py +++ b/num2rus/tests.py @@ -27,6 +27,43 @@ class Testconverter(unittest.TestCase): result = converter(10, zero_on=False) self.assertEqual(result, 'десять рублей') + def test_20(self): + result = converter(20, zero_on=False) + self.assertEqual(result, 'двадцать рублей') + + def test_21(self): + result = converter(21, zero_on=False) + self.assertEqual(result, 'двадцать один рубль') + + def test_25(self): + result = converter(25, zero_on=False) + self.assertEqual(result, 'двадцать пять рублей') + + def test_30(self): + result = converter(30, zero_on=False) + self.assertEqual(result, 'тридцать рублей') + + def test_33(self): + result = converter(33, zero_on=False) + self.assertEqual(result, 'тридцать три рубля') + + def test_43(self): + result = converter(43, zero_on=False) + self.assertEqual(result, 'сорок три рубля') + + def test_50(self): + result = converter(50, zero_on=False) + self.assertEqual(result, 'пятьдесят рублей') + def test_75(self): + result = converter(75, zero_on=False) + self.assertEqual(result, 'семьдесят пять рублей') + + def test_90(self): + result = converter(90, zero_on=False) + self.assertEqual(result, 'девяносто рублей') + def test_99(self): + result = converter(99, zero_on=False) + self.assertEqual(result, 'девяносто девять рублей') def test_100(self): result = converter(100, zero_on=False) self.assertEqual(result, 'сто рублей') @@ -246,9 +283,6 @@ class TestconverterNonZeroKops(unittest.TestCase): result = converter(123.40) self.assertEqual(result, 'сто двадцать три рубля сорок копеек') - def test_1_z_123_40(self): - result = converter(123.40) - self.assertEqual(result, 'сто двадцать три рубля сорок копеек') def test_1_z_133_41(self): result = converter(133.41)
Ошибка конвертации для чисел от 20 до 99 ![image](https://user-images.githubusercontent.com/31948979/73152887-0397e580-40eb-11ea-8544-d45a20a77119.png)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "num2rus/tests.py::Testconverter::test_20", "num2rus/tests.py::Testconverter::test_21", "num2rus/tests.py::Testconverter::test_25", "num2rus/tests.py::Testconverter::test_30", "num2rus/tests.py::Testconverter::test_33", "num2rus/tests.py::Testconverter::test_43", "num2rus/tests.py::Testconverter::test_50", "num2rus/tests.py::Testconverter::test_75", "num2rus/tests.py::Testconverter::test_90", "num2rus/tests.py::Testconverter::test_99" ]
[ "num2rus/tests.py::Testconverter::test_1", "num2rus/tests.py::Testconverter::test_10", "num2rus/tests.py::Testconverter::test_100", "num2rus/tests.py::Testconverter::test_100000", "num2rus/tests.py::Testconverter::test_100100", "num2rus/tests.py::Testconverter::test_100101", "num2rus/tests.py::Testconverter::test_100102", "num2rus/tests.py::Testconverter::test_100_101_102", "num2rus/tests.py::Testconverter::test_101", "num2rus/tests.py::Testconverter::test_101102", "num2rus/tests.py::Testconverter::test_101_101_102", "num2rus/tests.py::Testconverter::test_111", "num2rus/tests.py::Testconverter::test_11100", "num2rus/tests.py::Testconverter::test_1120", "num2rus/tests.py::Testconverter::test_115", "num2rus/tests.py::Testconverter::test_120", "num2rus/tests.py::Testconverter::test_1_100_101", "num2rus/tests.py::Testconverter::test_1_100_102", "num2rus/tests.py::Testconverter::test_1_101_102", "num2rus/tests.py::Testconverter::test_2", "num2rus/tests.py::Testconverter::test_3", "num2rus/tests.py::Testconverter::test_34000", "num2rus/tests.py::Testconverter::test_34102", "num2rus/tests.py::Testconverter::test_34103", "num2rus/tests.py::Testconverter::test_5120", "num2rus/tests.py::Testconverter::test_7", "num2rus/tests.py::TestconverterZeroKops::test_1", "num2rus/tests.py::TestconverterZeroKops::test_10", "num2rus/tests.py::TestconverterZeroKops::test_100", "num2rus/tests.py::TestconverterZeroKops::test_100000", "num2rus/tests.py::TestconverterZeroKops::test_100100", "num2rus/tests.py::TestconverterZeroKops::test_100101", "num2rus/tests.py::TestconverterZeroKops::test_100102", "num2rus/tests.py::TestconverterZeroKops::test_100_101_102", "num2rus/tests.py::TestconverterZeroKops::test_101", "num2rus/tests.py::TestconverterZeroKops::test_101102", "num2rus/tests.py::TestconverterZeroKops::test_101_101_102", "num2rus/tests.py::TestconverterZeroKops::test_111", "num2rus/tests.py::TestconverterZeroKops::test_11100", "num2rus/tests.py::TestconverterZeroKops::test_1120", "num2rus/tests.py::TestconverterZeroKops::test_115", "num2rus/tests.py::TestconverterZeroKops::test_120", "num2rus/tests.py::TestconverterZeroKops::test_1_100_101", "num2rus/tests.py::TestconverterZeroKops::test_1_100_102", "num2rus/tests.py::TestconverterZeroKops::test_1_101_102", "num2rus/tests.py::TestconverterZeroKops::test_2", "num2rus/tests.py::TestconverterZeroKops::test_3", "num2rus/tests.py::TestconverterZeroKops::test_34000", "num2rus/tests.py::TestconverterZeroKops::test_34102", "num2rus/tests.py::TestconverterZeroKops::test_34103", "num2rus/tests.py::TestconverterZeroKops::test_5120", "num2rus/tests.py::TestconverterZeroKops::test_7", "num2rus/tests.py::TestconverterNonZeroKops::test_10_z_21", "num2rus/tests.py::TestconverterNonZeroKops::test_1_z_01", "num2rus/tests.py::TestconverterNonZeroKops::test_1_z_10", "num2rus/tests.py::TestconverterNonZeroKops::test_1_z_123_40", "num2rus/tests.py::TestconverterNonZeroKops::test_1_z_131_40", "num2rus/tests.py::TestconverterNonZeroKops::test_1_z_133_41", "num2rus/tests.py::TestconverterNonZeroKops::test_2_z_02", "num2rus/tests.py::TestconverterNonZeroKops::test_3_z_07", "num2rus/tests.py::TestconverterNonZeroKops::test_3_z_08", "num2rus/tests.py::TestconverterNonZeroKops::test_7_z_11" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-01-29T19:32:57Z"
mit
tern-tools__tern-764
diff --git a/tern/analyze/common.py b/tern/analyze/common.py index df4889a..a9de792 100644 --- a/tern/analyze/common.py +++ b/tern/analyze/common.py @@ -31,9 +31,11 @@ logger = logging.getLogger(constants.logger_name) def get_shell_commands(shell_command_line): - '''Given a shell command line, get a list of Command objects''' + '''Given a shell command line, get a list of Command objects and report on + branch statements''' statements = general.split_command(shell_command_line) command_list = [] + branch_report = '' # traverse the statements, pick out the loop and commands. for stat in statements: if 'command' in stat: @@ -43,7 +45,13 @@ def get_shell_commands(shell_command_line): for st in loop_stat: if 'command' in st: command_list.append(Command(st['command'])) - return command_list + elif 'branch' in stat: + branch_report = branch_report + '\n'.join(stat['content']) + '\n\n' + if branch_report: + # add prefix + branch_report = '\nNon-deterministic branching statement: \n' + \ + branch_report + return command_list, branch_report def load_from_cache(layer, redo=False): @@ -478,7 +486,7 @@ def filter_install_commands(shell_command_line): 3. Return installed command objects, and messages for ignored commands and unrecognized commands''' report = '' - command_list = get_shell_commands(shell_command_line) + command_list, branch_report = get_shell_commands(shell_command_line) for command in command_list: command_lib.set_command_attrs(command) ignore_msgs, filter1 = remove_ignored_commands(command_list) @@ -487,7 +495,8 @@ def filter_install_commands(shell_command_line): report = report + formats.ignored + ignore_msgs if unrec_msgs: report = report + formats.unrecognized + unrec_msgs - + if branch_report: + report = report + branch_report return consolidate_commands(filter2), report
tern-tools/tern
044dc470ec5be8aacbc085a5ae307c608ff13255
diff --git a/tests/test_analyze_common.py b/tests/test_analyze_common.py index e40445c..82aba50 100644 --- a/tests/test_analyze_common.py +++ b/tests/test_analyze_common.py @@ -32,10 +32,18 @@ class TestAnalyzeCommon(unittest.TestCase): del self.test_dockerfile def testGetShellCommands(self): - command = common.get_shell_commands("yum install nfs-utils") + command, _ = common.get_shell_commands("yum install nfs-utils") self.assertEqual(type(command), list) self.assertEqual(len(command), 1) self.assertEqual(command[0].options, self.command1.options) + # test on branching command + branching_script = "if [ -z $var ]; then yum install nfs-utils; fi" + branch_command, report = common.get_shell_commands(branching_script) + self.assertEqual(type(branch_command), list) + # we will ignore branching command, so len should be 0 + self.assertEqual(len(branch_command), 0) + # and the report should not be None + self.assertTrue(report) def testLoadFromCache(self): '''Given a layer object, populate the given layer in case the cache isn't empty'''
Report RUN command statements that are non-deterministic like if and case statements **Describe the Feature** Currently, the report notices just says that if, case and for statements are unrecognizable. It would be nice to add notices for non-deterministic branching statements like if and case statements as you could only know the status of the branch at build time. **Implementation Changes** The new shell script parser produces a dictionary that will identify if and case statements. We could just look at this object and extract the if and case statements to create notices for them.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_analyze_common.py::TestAnalyzeCommon::testGetShellCommands" ]
[ "tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithDifferentCommands", "tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithSameCommands", "tests/test_analyze_common.py::TestAnalyzeCommon::testFilterInstallCommands", "tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithInstallFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithRemoveFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFilesFromCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFromCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testLoadNoticesFromCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testLoadPackagesFromCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithIgnoreFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithoutIgnoreFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithoutFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testSaveToCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithPackages", "tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithoutPackages" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-07-11T01:09:53Z"
bsd-2-clause
tern-tools__tern-768
diff --git a/tern/analyze/common.py b/tern/analyze/common.py index df4889a..e5835c9 100644 --- a/tern/analyze/common.py +++ b/tern/analyze/common.py @@ -31,9 +31,11 @@ logger = logging.getLogger(constants.logger_name) def get_shell_commands(shell_command_line): - '''Given a shell command line, get a list of Command objects''' + '''Given a shell command line, get a list of Command objects and report on + branch statements''' statements = general.split_command(shell_command_line) command_list = [] + branch_report = '' # traverse the statements, pick out the loop and commands. for stat in statements: if 'command' in stat: @@ -43,7 +45,13 @@ def get_shell_commands(shell_command_line): for st in loop_stat: if 'command' in st: command_list.append(Command(st['command'])) - return command_list + elif 'branch' in stat: + branch_report = branch_report + '\n'.join(stat['content']) + '\n\n' + if branch_report: + # add prefix + branch_report = '\nNon-deterministic branching statement: \n' + \ + branch_report + return command_list, branch_report def load_from_cache(layer, redo=False): @@ -201,7 +209,7 @@ def get_os_release(base_layer): return pretty_name.strip('"') -def collate_list_metadata(shell, listing): +def collate_list_metadata(shell, listing, work_dir): '''Given the shell and the listing for the package manager, collect metadata that gets returned as a list''' pkg_dict = {} @@ -212,7 +220,7 @@ def collate_list_metadata(shell, listing): return pkg_dict, msgs, warnings for item in command_lib.base_keys: if item in listing.keys(): - items, msg = command_lib.get_pkg_attr_list(shell, listing[item]) + items, msg = command_lib.get_pkg_attr_list(shell, listing[item], work_dir) msgs = msgs + msg pkg_dict.update({item: items}) else: @@ -290,7 +298,7 @@ def get_deb_package_licenses(deb_copyrights): return deb_licenses -def add_base_packages(image_layer, binary, shell): +def add_base_packages(image_layer, binary, shell, work_dir=None): '''Given the image layer, the binary to invoke and shell: 1. get the listing from the base.yml 2. Invoke any commands against the base layer @@ -313,7 +321,7 @@ def add_base_packages(image_layer, binary, shell): image_layer.origins.add_notice_to_origins( origin_layer, Notice(snippet_msg, 'info')) # get all the packages in the base layer - pkg_dict, invoke_msg, warnings = collate_list_metadata(shell, listing) + pkg_dict, invoke_msg, warnings = collate_list_metadata(shell, listing, work_dir) if listing.get("pkg_format") == "deb": pkg_dict["pkg_licenses"] = get_deb_package_licenses( @@ -338,7 +346,7 @@ def add_base_packages(image_layer, binary, shell): listing_key=binary), 'error')) -def fill_package_metadata(pkg_obj, pkg_listing, shell): +def fill_package_metadata(pkg_obj, pkg_listing, shell, work_dir): '''Given a Package object and the Package listing from the command library, fill in the attribute value returned from looking up the data and methods of the package listing. @@ -351,7 +359,7 @@ def fill_package_metadata(pkg_obj, pkg_listing, shell): pkg_listing, 'version') if version_listing: version_list, invoke_msg = command_lib.get_pkg_attr_list( - shell, version_listing, package_name=pkg_obj.name) + shell, version_listing, work_dir, package_name=pkg_obj.name) if version_list: pkg_obj.version = version_list[0] else: @@ -365,7 +373,7 @@ def fill_package_metadata(pkg_obj, pkg_listing, shell): pkg_listing, 'license') if license_listing: license_list, invoke_msg = command_lib.get_pkg_attr_list( - shell, license_listing, package_name=pkg_obj.name) + shell, license_listing, work_dir, package_name=pkg_obj.name) if license_list: pkg_obj.license = license_list[0] else: @@ -379,7 +387,7 @@ def fill_package_metadata(pkg_obj, pkg_listing, shell): pkg_listing, 'proj_url') if url_listing: url_list, invoke_msg = command_lib.get_pkg_attr_list( - shell, url_listing, package_name=pkg_obj.name) + shell, url_listing, work_dir, package_name=pkg_obj.name) if url_list: pkg_obj.proj_url = url_list[0] else: @@ -390,7 +398,7 @@ def fill_package_metadata(pkg_obj, pkg_listing, shell): origin_str, Notice(listing_msg, 'warning')) -def get_package_dependencies(package_listing, package_name, shell): +def get_package_dependencies(package_listing, package_name, shell, work_dir=None): '''The package listing is the result of looking up the command name in the command library. Given this listing, the package name and the shell return a list of package dependency names''' @@ -398,7 +406,7 @@ def get_package_dependencies(package_listing, package_name, shell): package_listing, 'deps') if deps_listing: deps_list, invoke_msg = command_lib.get_pkg_attr_list( - shell, deps_listing, package_name=package_name) + shell, deps_listing, work_dir, package_name=package_name) if deps_list: return list(set(deps_list)), '' return [], invoke_msg @@ -457,6 +465,7 @@ def consolidate_commands(command_list): new_list.append(command_list.pop(0)) while command_list: + # match the first command with its following commands. first = command_list.pop(0) for _ in range(0, len(command_list)): second = command_list.pop(0) @@ -465,8 +474,11 @@ def consolidate_commands(command_list): new_list.append(second) else: if not first.merge(second): - command_list.append(first) - new_list.append(first) + # Unable to merge second, we should keep second command. + command_list.append(second) + # after trying to merge with all following commands, add first command + # to the new_dict. + new_list.append(first) return new_list @@ -478,7 +490,7 @@ def filter_install_commands(shell_command_line): 3. Return installed command objects, and messages for ignored commands and unrecognized commands''' report = '' - command_list = get_shell_commands(shell_command_line) + command_list, branch_report = get_shell_commands(shell_command_line) for command in command_list: command_lib.set_command_attrs(command) ignore_msgs, filter1 = remove_ignored_commands(command_list) @@ -487,11 +499,12 @@ def filter_install_commands(shell_command_line): report = report + formats.ignored + ignore_msgs if unrec_msgs: report = report + formats.unrecognized + unrec_msgs - + if branch_report: + report = report + branch_report return consolidate_commands(filter2), report -def add_snippet_packages(image_layer, command, pkg_listing, shell): +def add_snippet_packages(image_layer, command, pkg_listing, shell, work_dir): '''Given an image layer object, a command object, the package listing and the shell used to invoke commands, add package metadata to the layer object. We assume the filesystem is already mounted and ready @@ -524,7 +537,7 @@ def add_snippet_packages(image_layer, command, pkg_listing, shell): # get package metadata for each package name for pkg_name in unique_pkgs: pkg = Package(pkg_name) - fill_package_metadata(pkg, pkg_invoke, shell) + fill_package_metadata(pkg, pkg_invoke, shell, work_dir) image_layer.add_package(pkg) diff --git a/tern/analyze/docker/analyze.py b/tern/analyze/docker/analyze.py index 6c2bccc..b112edb 100644 --- a/tern/analyze/docker/analyze.py +++ b/tern/analyze/docker/analyze.py @@ -106,7 +106,12 @@ def analyze_subsequent_layers(image_obj, shell, master_list, redo, dfobj=None, dfile_lock=False): # get packages for subsequent layers curr_layer = 1 + work_dir = None while curr_layer < len(image_obj.layers): # pylint:disable=too-many-nested-blocks + # If workdir changes, update value accordingly + # so we can later execute base.yml commands from the workdir. + if image_obj.layers[curr_layer].get_layer_workdir() is not None: + work_dir = image_obj.layers[curr_layer].get_layer_workdir() # if there is no shell, try to see if it exists in the current layer if not shell: shell = common.get_shell(image_obj.layers[curr_layer]) @@ -126,7 +131,7 @@ def analyze_subsequent_layers(image_obj, shell, master_list, redo, dfobj=None, if isinstance(pkg_listing, str): try: common.add_base_packages( - image_obj.layers[curr_layer], pkg_listing, shell) + image_obj.layers[curr_layer], pkg_listing, shell, work_dir) except KeyboardInterrupt: logger.critical(errors.keyboard_interrupt) abort_analysis() @@ -134,7 +139,7 @@ def analyze_subsequent_layers(image_obj, shell, master_list, redo, dfobj=None, try: common.add_snippet_packages( image_obj.layers[curr_layer], command, pkg_listing, - shell) + shell, work_dir) except KeyboardInterrupt: logger.critical(errors.keyboard_interrupt) abort_analysis() diff --git a/tern/classes/image_layer.py b/tern/classes/image_layer.py index 46a74ad..484b802 100644 --- a/tern/classes/image_layer.py +++ b/tern/classes/image_layer.py @@ -327,3 +327,10 @@ class ImageLayer: file_data.set_checksum('sha256', attrs_tuple[2]) file_data.extattrs = attrs_tuple[0] self.add_file(file_data) + + def get_layer_workdir(self): + # If the layer is created by a WORKDIR command then return the workdir + match = re.search(r"\bWORKDIR\ (\/\w+)+\b", self.created_by) + if match: + return match.group().split()[1] + return None diff --git a/tern/command_lib/command_lib.py b/tern/command_lib/command_lib.py index c28c1b1..d3dc999 100644 --- a/tern/command_lib/command_lib.py +++ b/tern/command_lib/command_lib.py @@ -205,7 +205,7 @@ def invoke_in_rootfs(snippet_list, shell, package=''): raise -def get_pkg_attr_list(shell, attr_dict, package_name='', chroot=True, +def get_pkg_attr_list(shell, attr_dict, work_dir, package_name='', chroot=True, # pylint:disable=too-many-arguments override=''): '''The command library has package attributes listed like this: {invoke: {1: {container: [command1, command2]}, @@ -225,6 +225,9 @@ def get_pkg_attr_list(shell, attr_dict, package_name='', chroot=True, if 'container' in attr_dict['invoke'][step].keys(): snippet_list = attr_dict['invoke'][step]['container'] result = '' + # If work_dir exist cd into it + if work_dir is not None: + snippet_list.insert(0, 'cd ' + work_dir) # if we need to run in a chroot environment if chroot: try: diff --git a/tern/tools/verify_invoke.py b/tern/tools/verify_invoke.py index 95a6265..d016430 100644 --- a/tern/tools/verify_invoke.py +++ b/tern/tools/verify_invoke.py @@ -28,6 +28,15 @@ def look_up_lib(keys): return subd +def get_workdir(image_obj): + # get the workdir from the image config where the commands will be executed + config = image_obj.get_image_config(image_obj.get_image_manifest()) + workdir = config['config']['WorkingDir'] + if workdir == '': + return None + return workdir + + if __name__ == '__main__': parser = argparse.ArgumentParser( description=''' @@ -78,8 +87,9 @@ if __name__ == '__main__': info_dict = look_up_lib(args.keys) # try to invoke the commands try: + work_dir = get_workdir(image_obj) result = command_lib.get_pkg_attr_list( - args.shell, info_dict, args.package) + args.shell, info_dict, work_dir, args.package) print('Output list: ' + ' '.join(result[0])) print('Error messages: ' + result[1]) print('Number of elements: ' + str(len(result[0])))
tern-tools/tern
044dc470ec5be8aacbc085a5ae307c608ff13255
diff --git a/tests/test_analyze_common.py b/tests/test_analyze_common.py index e40445c..82aba50 100644 --- a/tests/test_analyze_common.py +++ b/tests/test_analyze_common.py @@ -32,10 +32,18 @@ class TestAnalyzeCommon(unittest.TestCase): del self.test_dockerfile def testGetShellCommands(self): - command = common.get_shell_commands("yum install nfs-utils") + command, _ = common.get_shell_commands("yum install nfs-utils") self.assertEqual(type(command), list) self.assertEqual(len(command), 1) self.assertEqual(command[0].options, self.command1.options) + # test on branching command + branching_script = "if [ -z $var ]; then yum install nfs-utils; fi" + branch_command, report = common.get_shell_commands(branching_script) + self.assertEqual(type(branch_command), list) + # we will ignore branching command, so len should be 0 + self.assertEqual(len(branch_command), 0) + # and the report should not be None + self.assertTrue(report) def testLoadFromCache(self): '''Given a layer object, populate the given layer in case the cache isn't empty'''
Tern has no concept of WORKDIR directory (required to collect go module info) **Describe the bug** Most golang Dockerfiles will have a `WORKDIR` where the go modules get installed. i.e. ``` # Start from the latest golang base image FROM golang:1.14-alpine as builder # Set the Current Working Directory inside the container WORKDIR /app # Copy go mod and sum files COPY go.mod go.sum ./ # Download all dependencies. Dependencies will be cached if the go.mod and go.sum files are not changed RUN go mod download # Copy the source from the current directory to the Working Directory inside the container COPY . . # Build the Go app RUN CGO_ENABLED=0 GOOS=linux go build -v -a -installsuffix cgo -o swag cmd/swag/main.go ``` If a `WORKDIR` is established, all the proceeding `RUN` , `CMD` , `ADD` , `COPY` , or `ENTRYPOINT` commands will be executed in this `WORKDIR`. Tern currently has no concept of a `WORKDIR`. This is an issue when it comes to analyzing the go modules in a docker image because the go command is only present in the `WORKDIR`, compared to most package manager utilities that are universally available in the image. The command `go list -m all | tail -n +2 | cut -d ' ' -f1` which attempts to collect the module names, will return `Command failed. go list -m: not using modules` if not executed from the proper WORKDIR directory. However, if you change the `base.yml` command to cd into the `WORKDIR` first, then the command executes without errors. cd-ing into the `WORKDIR` directory is not something we can do from `base.yml` because we won't know the name of the `WORKDIR` directory and there is currently no way to reference this. **To Reproduce** Steps to reproduce the behavior: 1. Add `go` package manager to base.yml and snippets.yml. 2. Run tern on a go image (similar to Dockerfile above) 3. If you change `base.yml` to cd into the `WORKDIR` first by running `cd /app; /usr/local/go/bin/go list -m all | tail -n +2 | cut -d ' ' -f1` to collect go modules, the command will work and Tern will output the go modules in the report. If you only use `/usr/local/go/bin/go list -m all | tail -n +2 | cut -d ' ' -f1` the command will fail. 4. See error/output below. **Error in terminal** Output when you cd into the WORKDIR first in `base.yml`: ``` File licenses found in Layer: None Packages found in Layer: github.com/BurntSushi/toml-v0.3.1, github.com/KyleBanks/depth-v1.2.1, github.com/PuerkitoBio/purell-v1.1.1, github.com/PuerkitoBio/urlesc-v0.0.0-20170810143723-de5bf2ad4578, github.com/alecthomas/template-v0.0.0-20190718012654-fb15b899a751, github.com/cpuguy83/go-md2man/v2- v2.0.0-20190314233015-f79a8a8ca69d, github.com/davecgh/go-spew-v1.1.1, github.com/ghodss/yaml-v1.0.0, github.com/gin-contrib/gzip-v0.0.1, github.com/gin-contrib/sse-v0.1.0, github.com/gin-gonic/gin-v1.4.0, github.com/go- openapi/jsonpointer-v0.19.3, github.com/go-openapi/jsonreference-v0.19.3, github.com/go-openapi/spec-v0.19.4, github.com/go-openapi/swag-v0.19.5, github.com/golang/protobuf-v1.3.1, github.com/json-iterator/go-v1.1.6, github.com/kr /pretty-v0.1.0, github.com/kr/pty-v1.1.5, github.com/kr/text-v0.1.0, github.com/mailru/easyjson-v0.0.0-20190626092158- b2ccc519800e, github.com/mattn/go-isatty-v0.0.8, github.com/modern-go/concurrent-v0.0.0-20180306012644- bacd9c7ef1dd, github.com/modern-go/reflect2-v1.0.1, github.com/pkg/errors-v0.8.1, github.com/pmezard/go-difflib-v1.0.0, github.com/russross/blackfriday/v2-v2.0.1, github.com/satori/go.uuid-v1.2.0, github.com/shopspring/decimal-v1.2.0, github.com/shurcooL/sanitized_anchor_name-v1.0.0, github.com/stretchr/objx-v0.2.0, github.com/stretchr/testify-v1.4.0, github.com/swaggo/files-v0.0.0-20190704085106-630677cd5c14, github.com/swaggo/gin-swagger-v1.2.0, github.com/ugorji/go-v1.1.5-pre, github.com/ugorji/go/codec-v1.1.5-pre, github.com/urfave/cli-v1.20.0, github.com/urfave /cli/v2-v2.1.1, golang.org/x/crypto-v0.0.0-20190611184440-5c40567a22f8, golang.org/x/net-v0.0.0-20190827160401- ba9fcec4b297, golang.org/x/sync-v0.0.0-20190423024810-112230192c58, golang.org/x/sys- v0.0.0-20190616124812-15dcb6c0061f, golang.org/x/text-v0.3.2, golang.org/x/tools- v0.0.0-20190614205625-5aca471b1d59, gopkg.in/check.v1-v1.0.0-20180628173108-788fd7840127, gopkg.in/go- playground/assert.v1-v1.2.1, gopkg.in/go-playground/validator.v8-v8.18.2, gopkg.in/yaml.v2-v2.2.2 Licenses found in Layer: None ``` Output when you don't `cd` into the `WORKDIR`: ``` 020-07-13 14:02:56,250 - DEBUG - rootfs - Running command: sudo unshare -pf --mount-proc=/home/rjudge/.tern/temp/mergedir/proc chroot /home/rjudge/.tern/temp/mergedir /bin/sh -c /usr/local/go/bin/go list -m all | tail -n +2 | cut -d ' ' -f1 2020-07-13 14:02:56,279 - ERROR - rootfs - Command failed. go list -m: not using modules ``` **Expected behavior** Tern should be executing from the `WORKDIR` directory, if it exists.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_analyze_common.py::TestAnalyzeCommon::testGetShellCommands" ]
[ "tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithDifferentCommands", "tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithSameCommands", "tests/test_analyze_common.py::TestAnalyzeCommon::testFilterInstallCommands", "tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithInstallFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithRemoveFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFilesFromCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFromCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testLoadNoticesFromCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testLoadPackagesFromCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithIgnoreFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithoutIgnoreFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithoutFlag", "tests/test_analyze_common.py::TestAnalyzeCommon::testSaveToCache", "tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithPackages", "tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithoutPackages" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-07-15T21:42:14Z"
bsd-2-clause
terrencepreilly__darglint-24
diff --git a/darglint/integrity_checker.py b/darglint/integrity_checker.py index 729993d..1d24c0f 100644 --- a/darglint/integrity_checker.py +++ b/darglint/integrity_checker.py @@ -184,7 +184,7 @@ class IntegrityChecker(object): fun_type = self.function.return_type doc_type = self.docstring.get_types(Sections.RETURNS_SECTION) if not doc_type or isinstance(doc_type, list): - doc_type = '' + doc_type = None if fun_type is not None and doc_type is not None: if fun_type != doc_type: line_numbers = self.docstring.get_line_numbers(
terrencepreilly/darglint
3e8a1d23cf5f9a007094b34a984d5041631d2906
diff --git a/tests/test_integrity_checker.py b/tests/test_integrity_checker.py index 53f6c8f..3d2b895 100644 --- a/tests/test_integrity_checker.py +++ b/tests/test_integrity_checker.py @@ -288,6 +288,40 @@ class IntegrityCheckerTestCase(TestCase): self.assertEqual(error.expected, 'int') self.assertEqual(error.actual, 'float') + def test_return_type_unchecked_if_not_defined_in_docstring(self): + program = '\n'.join([ + 'def foo() -> str:', + ' """Just a foobar.', + '', + ' Returns:', + ' bar', + '', + ' """', + ' return "bar"', + ]) + tree = ast.parse(program) + functions = get_function_descriptions(tree) + checker = IntegrityChecker() + checker.run_checks(functions[0]) + self.assertEqual(len(checker.errors), 0) + + def test_return_type_unchecked_if_not_defined_in_function(self): + program = '\n'.join([ + 'def foo():', + ' """Just a foobar.', + '', + ' Returns:', + ' str: bar', + '', + ' """', + ' return "bar"', + ]) + tree = ast.parse(program) + functions = get_function_descriptions(tree) + checker = IntegrityChecker() + checker.run_checks(functions[0]) + self.assertEqual(len(checker.errors), 0) + def test_return_type_checked_if_defined_in_docstring_and_function(self): program = '\n'.join([ 'def update_model(x: dict) -> dict:',
I203 raised for return type annotations. `darglint` raises an error (`I203 Return type mismatch: ~Return: expected str but was `) when return type annotations are provided without a declared docstring return type. This is likely an oversight, this check should only fire if _both_ a type annotation and docstring type declaration are present. Failure example: ``` def foo() -> str: """Just a standard foobar. Returns: Bar. """ return "bar" ````
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_return_type_unchecked_if_not_defined_in_docstring" ]
[ "tests/test_integrity_checker.py::IntegrityCheckerSphinxTestCase::test_missing_parameter", "tests/test_integrity_checker.py::IntegrityCheckerSphinxTestCase::test_variable_doesnt_exist", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_arg_types_checked_if_in_both_docstring_and_function", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_bare_noqa", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_excess_parameter_added", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_excess_yield_added_to_errors", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_extra_raises_added_to_error", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_global_noqa", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_global_noqa_works_for_syntax_errors", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_incorrect_syntax_raises_exception_optionally", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_missing_parameter_added", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_missing_raises_added_to_error", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_missing_return_parameter_added", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_missing_yield_added_to_errors", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_after_excess_raises", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_excess_return", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_excess_parameters", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_excess_yield", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_missing_parameters", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_missing_raises", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_missing_yield", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_parameter_type_mismatch", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_parameter_type_mismatch_by_name", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_return_type_mismatch", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_missing_return_parameter_added", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_return_type_checked_if_defined_in_docstring_and_function", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_return_type_unchecked_if_not_defined_in_function", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_skips_functions_without_docstrings", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_throws_assertion_if_no_colon_in_parameter_line", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_throws_assertion_if_no_content_after_colon", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_try_block_no_excess_error", "tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_yields_from_added_to_error" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2019-06-03T17:04:36Z"
mit
terryyin__lizard-120
diff --git a/lizard.py b/lizard.py index 4d21e8a..cac8020 100755 --- a/lizard.py +++ b/lizard.py @@ -316,7 +316,7 @@ class NestingStack(object): self.pending_function = None self.nesting_stack.append(Namespace(token)) - def start_new_funciton_nesting(self, function): + def start_new_function_nesting(self, function): self.pending_function = function def _create_nesting(self): @@ -386,7 +386,7 @@ class FileInfoBuilder(object): self.fileinfo.filename, self.current_line) self.current_function.top_nesting_level = self.current_nesting_level - self.start_new_funciton_nesting(self.current_function) + self.start_new_function_nesting(self.current_function) def add_condition(self, inc=1): self.current_function.cyclomatic_complexity += inc diff --git a/lizard_ext/lizardns.py b/lizard_ext/lizardns.py index e057e73..4ee09bf 100644 --- a/lizard_ext/lizardns.py +++ b/lizard_ext/lizardns.py @@ -1,13 +1,16 @@ """ This extension counts nested control structures within a function. -The extension is implemented with C++ in mind. + +The extension is implemented with C++ and Python in mind, +but it is expected to work with other languages supported by Lizard +with its language reader implementing 'nesting_level' metric for tokens. The code borrows heavily from implementation of Nesting Depth extension originally written by Mehrdad Meh and Terry Yin. """ -from lizard import FileInfoBuilder, FunctionInfo -from lizard_ext.lizardnd import patch, patch_append_method +from lizard import FunctionInfo +from lizard_ext.lizardnd import patch_append_method DEFAULT_NS_THRESHOLD = 3 @@ -32,106 +35,90 @@ class LizardExtension(object): # pylint: disable=R0903 def __call__(self, tokens, reader): """The intent of the code is to detect control structures as entities. - The complexity arises from tracking of - control structures without brackets. - The termination of such control structures in C-like languages - is the next statement or control structure with a compound statement. - - Moreover, control structures with two or more tokens complicates - the proper counting, for example, 'else if'. + The implementation relies on nesting level metric for tokens + provided by language readers. + If the following contract for the nesting level metric does not hold, + this implementation of nested structure counting is invalid. - In Python with meaningful indentation, - tracking the indentation levels becomes crucial - to identify boundaries of the structures. - The following code is not designed for Python. - """ - structures = set(['if', 'else', 'foreach', 'for', 'while', 'do', - 'try', 'catch', 'switch']) + If a control structure has started its block (eg. '{'), + and its level is **less** than the next structure, + the next structure is nested. - structure_indicator = "{" - structure_end = "}" - indent_indicator = ";" - - for token in tokens: - if reader.context.is_within_structure(): - if token == "(": - reader.context.add_parentheses(1) - elif token == ")": - reader.context.add_parentheses(-1) + If a control structure has *not* started its block, + and its level is **no more** than the next structure, + the next structure is nested (compound statement). - if not reader.context.is_within_parentheses(): - if token in structures: - reader.context.add_nested_structure(token) + If a control structure level is **higher** than the next structure, + it is considered closed. - elif token == structure_indicator: - reader.context.add_brace() - - elif token == structure_end: - reader.context.pop_brace() - reader.context.pop_nested_structure() - - elif token == indent_indicator: - reader.context.pop_nested_structure() - - yield token + If a control structure has started its block, + and its level is **equal** to the next structure, + it is considered closed. - -# TODO: Some weird false positive from pylint. # pylint: disable=fixme -# pylint: disable=E1101 -class NSFileInfoAddition(FileInfoBuilder): - - def add_nested_structure(self, token): - """Conditionally adds nested structures.""" - # Handle compound else-if. - if token == "if" and self.current_function.structure_stack: - prev_token, br_state = self.current_function.structure_stack[-1] - if (prev_token == "else" and - br_state == self.current_function.brace_count): + The level of any non-structure tokens is treated + with the same logic as for the next structures + for control block **starting** and **closing** purposes. + """ + # TODO: Delegate this to language readers # pylint: disable=fixme + structures = set(['if', 'else', 'elif', 'for', 'foreach', 'while', 'do', + 'try', 'catch', 'switch', 'finally', 'except', + 'with']) + + cur_level = 0 + start_structure = [False] # Just to make it mutable. + structure_stack = [] # [(token, ns_level)] + + def add_nested_structure(token): + """Conditionally adds nested structures.""" + if structure_stack: + prev_token, ns_level = structure_stack[-1] + if cur_level == ns_level: + if (token == "if" and prev_token == "else" and + not start_structure[0]): + return # Compound 'else if' in C-like languages. + if start_structure[0]: + structure_stack.pop() + elif cur_level < ns_level: + while structure_stack and ns_level >= cur_level: + _, ns_level = structure_stack.pop() + + structure_stack.append((token, cur_level)) + start_structure[0] = False # Starts on the next level with body. + + ns_cur = len(structure_stack) + if reader.context.current_function.max_nested_structures < ns_cur: + reader.context.current_function.max_nested_structures = ns_cur + + def pop_nested_structure(): + """Conditionally pops the nested structures if levels match.""" + if not structure_stack: return - self.current_function.structure_stack.append( - (token, self.current_function.brace_count)) - - ns_cur = len(self.current_function.structure_stack) - if self.current_function.max_nested_structures < ns_cur: - self.current_function.max_nested_structures = ns_cur + _, ns_level = structure_stack[-1] - def pop_nested_structure(self): - """Conditionally pops the structure count if braces match.""" - if not self.current_function.structure_stack: - return + if cur_level > ns_level: + start_structure[0] = True - _, br_state = self.current_function.structure_stack[-1] - if br_state == self.current_function.brace_count: - self.current_function.structure_stack.pop() + elif cur_level < ns_level: + while structure_stack and ns_level >= cur_level: + _, ns_level = structure_stack.pop() + start_structure[0] = bool(structure_stack) - def add_brace(self): - self.current_function.brace_count += 1 + elif start_structure[0]: + structure_stack.pop() - def pop_brace(self): - # pylint: disable=fixme - # TODO: For some reason, brace count goes negative. - # assert self.current_function.brace_count > 0 - self.current_function.brace_count -= 1 - - def add_parentheses(self, inc): - """Dual purpose parentheses manipulator.""" - self.current_function.paren_count += inc - - def is_within_parentheses(self): - assert self.current_function.paren_count >= 0 - return self.current_function.paren_count != 0 + for token in tokens: + cur_level = reader.context.current_nesting_level + if token in structures: + add_nested_structure(token) + else: + pop_nested_structure() - def is_within_structure(self): - return bool(self.current_function.structure_stack) + yield token def _init_nested_structure_data(self, *_): self.max_nested_structures = 0 - self.brace_count = 0 - self.paren_count = 0 - self.structure_stack = [] -patch(NSFileInfoAddition, FileInfoBuilder) patch_append_method(_init_nested_structure_data, FunctionInfo, "__init__")
terryyin/lizard
bdcc784bd22d8e48db22884dfeb42647ffb67fbf
diff --git a/test/testNestedStructures.py b/test/testNestedStructures.py old mode 100755 new mode 100644 index 7eee514..1a2d826 --- a/test/testNestedStructures.py +++ b/test/testNestedStructures.py @@ -1,5 +1,7 @@ import unittest -from .testHelpers import get_cpp_function_list_with_extnesion + +from .testHelpers import get_cpp_function_list_with_extnesion, \ + get_python_function_list_with_extnesion from lizard_ext.lizardns import LizardExtension as NestedStructure @@ -7,6 +9,10 @@ def process_cpp(source): return get_cpp_function_list_with_extnesion(source, NestedStructure()) +def process_python(source): + return get_python_function_list_with_extnesion(source, NestedStructure()) + + class TestCppNestedStructures(unittest.TestCase): def test_no_structures(self): @@ -209,3 +215,122 @@ class TestCppNestedStructures(unittest.TestCase): } """) self.assertEqual(3, result[0].max_nested_structures) + + +class TestPythonNestedStructures(unittest.TestCase): + + def test_no_structures(self): + result = process_python("def fun():\n pass") + self.assertEqual(0, result[0].max_nested_structures) + + def test_if_structure(self): + result = process_python("def fun():\n if a:\n return") + self.assertEqual(1, result[0].max_nested_structures) + + def test_for_structure(self): + result = process_python("def fun():\n for a in b:\n foo()") + self.assertEqual(1, result[0].max_nested_structures) + + def test_condition_in_if_structure(self): + result = process_python("def fun():\n if a and b:\n return") + self.assertEqual(1, result[0].max_nested_structures) + + def test_elif(self): + result = process_python(""" + def c(): + if a: + baz() + elif c: + foo() + """) + self.assertEqual(1, result[0].max_nested_structures) + + def test_nested_if_structures(self): + result = process_python(""" + def c(): + if a: + if b: + baz() + else: + foo() + """) + self.assertEqual(2, result[0].max_nested_structures) + + def test_equal_metric_structures(self): + result = process_python(""" + def c(): + if a: + if b: + baz() + else: + foo() + + for a in b: + if c: + bar() + """) + self.assertEqual(2, result[0].max_nested_structures) + + def test_while(self): + result = process_python(""" + def c(): + while a: + baz() + """) + self.assertEqual(1, result[0].max_nested_structures) + + def test_try_catch(self): + result = process_python(""" + def c(): + try: + f.open() + catch Exception as err: + print(err) + finally: + f.close() + """) + self.assertEqual(1, result[0].max_nested_structures) + + def test_two_functions(self): + result = process_python(""" + def c(): + try: + if a: + foo() + catch Exception as err: + print(err) + + def d(): + for a in b: + for x in y: + if i: + return j + """) + self.assertEqual(2, result[0].max_nested_structures) + self.assertEqual(3, result[1].max_nested_structures) + + def test_nested_functions(self): + result = process_python(""" + def c(): + def d(): + for a in b: + for x in y: + if i: + return j + try: + if a: + foo() + catch Exception as err: + print(err) + + """) + self.assertEqual(3, result[0].max_nested_structures) + self.assertEqual(2, result[1].max_nested_structures) + + def test_with_structure(self): + result = process_python(""" + def c(): + with open(f) as input_file: + foo(f) + """) + self.assertEqual(1, result[0].max_nested_structures)
Detection of Deeply Nested Control Structures This metric may not apply to the whole function, but the maximum 'nestedness' (nesting for-loops, if-statements, etc.) may be an interesting metric to detect code smell. It closely relates to indentation. Got this from the Linux kernel coding style: >The answer to that is that if you need more than 3 levels of indentation, you're screwed anyway, and should fix your program.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/testNestedStructures.py::TestPythonNestedStructures::test_equal_metric_structures", "test/testNestedStructures.py::TestPythonNestedStructures::test_nested_functions", "test/testNestedStructures.py::TestPythonNestedStructures::test_nested_if_structures", "test/testNestedStructures.py::TestPythonNestedStructures::test_try_catch", "test/testNestedStructures.py::TestPythonNestedStructures::test_two_functions", "test/testNestedStructures.py::TestPythonNestedStructures::test_with_structure" ]
[ "test/testNestedStructures.py::TestCppNestedStructures::test_and_condition_in_if_structure", "test/testNestedStructures.py::TestCppNestedStructures::test_do", "test/testNestedStructures.py::TestCppNestedStructures::test_forever_loop", "test/testNestedStructures.py::TestCppNestedStructures::test_if_structure", "test/testNestedStructures.py::TestCppNestedStructures::test_nested_if_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_nested_loop_mixed_brackets", "test/testNestedStructures.py::TestCppNestedStructures::test_no_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_non_r_value_ref_in_body", "test/testNestedStructures.py::TestCppNestedStructures::test_scope", "test/testNestedStructures.py::TestCppNestedStructures::test_switch_case", "test/testNestedStructures.py::TestCppNestedStructures::test_terminator_in_parentheses", "test/testNestedStructures.py::TestCppNestedStructures::test_ternary_operator", "test/testNestedStructures.py::TestCppNestedStructures::test_try_catch", "test/testNestedStructures.py::TestCppNestedStructures::test_while", "test/testNestedStructures.py::TestPythonNestedStructures::test_condition_in_if_structure", "test/testNestedStructures.py::TestPythonNestedStructures::test_elif", "test/testNestedStructures.py::TestPythonNestedStructures::test_for_structure", "test/testNestedStructures.py::TestPythonNestedStructures::test_if_structure", "test/testNestedStructures.py::TestPythonNestedStructures::test_no_structures", "test/testNestedStructures.py::TestPythonNestedStructures::test_while" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-05-08T06:41:31Z"
mit
terryyin__lizard-144
diff --git a/lizard_languages/clike.py b/lizard_languages/clike.py index 1134e96..a17fb03 100644 --- a/lizard_languages/clike.py +++ b/lizard_languages/clike.py @@ -235,7 +235,7 @@ class CLikeStates(CodeStateMachine): self.context.add_to_long_function_name(token) def _state_dec_to_imp(self, token): - if token == 'const' or token == 'noexcept': + if token in ('const', 'noexcept', '&', '&&'): self.context.add_to_long_function_name(" " + token) elif token == 'throw': self._state = self._state_throw
terryyin/lizard
1933addc0f0d4febb8b2273048f81556c0062d61
diff --git a/test/testCyclomaticComplexity.py b/test/testCyclomaticComplexity.py index 346117e..d6efefa 100644 --- a/test/testCyclomaticComplexity.py +++ b/test/testCyclomaticComplexity.py @@ -79,3 +79,13 @@ class TestCppCyclomaticComplexity(unittest.TestCase): """) self.assertEqual(4, result[0].cyclomatic_complexity) + def test_ref_qualifiers(self): + """C++11 rvalue ref qualifiers look like AND operator.""" + result = get_cpp_function_list( + "struct A { void foo() && { return bar() && baz(); } };") + self.assertEqual(1, len(result)) + self.assertEqual(2, result[0].cyclomatic_complexity) + result = get_cpp_function_list( + "struct A { void foo() const && { return bar() && baz(); } };") + self.assertEqual(1, len(result)) + self.assertEqual(2, result[0].cyclomatic_complexity) diff --git a/test/test_languages/testCAndCPP.py b/test/test_languages/testCAndCPP.py index 0928b15..b175fcd 100644 --- a/test/test_languages/testCAndCPP.py +++ b/test/test_languages/testCAndCPP.py @@ -423,6 +423,7 @@ class Test_c_cpp_lizard(unittest.TestCase): result = get_cpp_function_list('''int fun(struct a){}''') self.assertEqual(1, len(result)) + def test_trailing_return_type(self): """C++11 trailing return type for functions.""" result = get_cpp_function_list("auto foo() -> void {}") @@ -432,6 +433,21 @@ class Test_c_cpp_lizard(unittest.TestCase): self.assertEqual(1, len(result)) self.assertEqual("foo", result[0].name) + def test_ref_qualifiers(self): + """C++11 ref qualifiers for member functions.""" + result = get_cpp_function_list("struct A { void foo() & {} };") + self.assertEqual(1, len(result)) + self.assertEqual("A::foo", result[0].name) + result = get_cpp_function_list("struct A { void foo() const & {} };") + self.assertEqual(1, len(result)) + self.assertEqual("A::foo", result[0].name) + result = get_cpp_function_list("struct A { void foo() && {} };") + self.assertEqual(1, len(result)) + self.assertEqual("A::foo", result[0].name) + result = get_cpp_function_list("struct A { void foo() const && {} };") + self.assertEqual(1, len(result)) + self.assertEqual("A::foo", result[0].name) + class Test_Preprocessing(unittest.TestCase):
Bug: C++11 ref qualified functions Lizard misses C++11 ref qualified member functions. These functions don't appear in the report or the result database. ```cpp struct A { void foo() & {}; void foo() const & {}; void foo() && {}; void foo() const && {}; };
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_ref_qualifiers", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_ref_qualifiers" ]
[ "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_and", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_else_if", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_forever_loop", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_no_condition", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_non_r_value_ref_in_body", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_one_condition", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_question_mark", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_r_value_ref_in_body", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_r_value_ref_in_parameter", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_statement_no_curly_brackets", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_typedef", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_sharp_if_and_sharp_elif_counts_in_cc_number", "test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_two_function_with_non_r_value_ref_in_body", "test/test_languages/testCAndCPP.py::Test_C_Token_extension::test_connecting_marcro", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_1", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_braket_that_is_not_a_namespace", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_class_with_inheritance", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_complicated_c_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list_noexcept", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initializer_list", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_uniform_initialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_destructor_implementation", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_nested_template", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_slash_within_string", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_empty", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_followed_with_one_word_is_ok", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_noexcept", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_throw", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_declaration_is_not_counted", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_name_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_operator", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_that_returns_function_pointers", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_1_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_content", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_no_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param2", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_global_var_constructor", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_inline_operator", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_less_then_is_not_template", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_namespace_alias", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class_middle", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_unnamed_namespace", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_no_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_initializer_list", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_uniform_initialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_not_old_style_c_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function_has_semicolon", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_in_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_const", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_namespace", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_noexcept", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_macro_in_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_only_word_can_be_function_name", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_shift", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_with_namespace", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_with_complicated_name", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_parentheses_before_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_pre_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_return_type", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_stupid_macro_before_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_part_of_function_name", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_reference", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_full_specialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_partial_specialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function_specialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_pointer", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference_as_reference", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_trailing_return_type", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_simplest_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_typedef_is_not_old_style_c_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_underscore", "test/test_languages/testCAndCPP.py::Test_Preprocessing::test_content_macro_should_be_ignored", "test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessor_is_not_function", "test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessors_should_be_ignored_outside_function_implementation", "test/test_languages/testCAndCPP.py::Test_Big::test_trouble" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2016-08-03T13:54:59Z"
mit
terryyin__lizard-174
diff --git a/lizard_ext/lizardns.py b/lizard_ext/lizardns.py index fe24dfc..3330550 100644 --- a/lizard_ext/lizardns.py +++ b/lizard_ext/lizardns.py @@ -39,7 +39,7 @@ class LizardExtension(object): # pylint: disable=R0903 If the following contract for the nesting level metric does not hold, this implementation of nested structure counting is invalid. - If a control structure has started its block (eg. '{'), + If a control structure has started its block (e.g., '{'), and its level is **less** than the next structure, the next structure is nested. @@ -107,14 +107,13 @@ class LizardExtension(object): # pylint: disable=R0903 structure_stack.pop() for token in tokens: + yield token cur_level = reader.context.current_nesting_level if token in structures: add_nested_structure(token) else: pop_nested_structure() - yield token - def _init_nested_structure_data(self, *_): self.max_nested_structures = 0 diff --git a/lizard_languages/clike.py b/lizard_languages/clike.py index 20b683f..bf97d97 100644 --- a/lizard_languages/clike.py +++ b/lizard_languages/clike.py @@ -88,50 +88,69 @@ class CLikeNestingStackStates(CodeStateMachine): The handling of these complex cases is unspecified and can be ignored. """ - # Beasts that can be defined within one line without braces. - __braceless_structures = set(['if', 'else', 'for', 'while', 'do', - 'switch']) - __paren_count = 0 # Used only to tackle the beasts. - __braceless = None # Applies only to the beasts. - __structure_brace_stack = [] # Boolean stack for structures' brace states. - - def __pop_braceless_structures(self): - """Pops structures up to the one with braces.""" + __structures = set(["if", "else", "for", "while", "do", "switch", + "try", "catch"]) + # Structures paired on the same nesting level. + __paired_structures = {"if": "else", "try": "catch", "catch": "catch", + "do": "while"} + __wait_for_pair = False # Wait for the pair structure to close the level. + __structure_brace_stack = [] # Structure and brace states. + + def __pop_without_pair(self): + """Continue poping nesting levels without the pair.""" + self.__wait_for_pair = False + while (self.__structure_brace_stack and + self.__structure_brace_stack[-1]): + structure = self.__structure_brace_stack.pop() + self.context.pop_nesting() + if structure in self.__paired_structures: + self.__wait_for_pair = self.__paired_structures[structure] + return + + def __pop_structures(self): + """Pops structures up to the one with braces or a waiting pair.""" self.context.pop_nesting() - is_structure = None + structure = None if self.__structure_brace_stack: - is_structure = self.__structure_brace_stack.pop() + structure = self.__structure_brace_stack.pop() - while (is_structure is not None and self.__structure_brace_stack and - self.__structure_brace_stack[-1]): - self.__structure_brace_stack.pop() - self.context.pop_nesting() + if structure is None: + return + if structure in self.__paired_structures: + self.__wait_for_pair = self.__paired_structures[structure] + return + self.__pop_without_pair() def __else_if_structure(self, token): """Handles possible compound 'else if' after 'else' token.""" self._state = self.__declare_structure - if token != "if": + if token == "if": + self.__structure_brace_stack[-1] = "if" + else: self._state(token) + @CodeStateMachine.read_inside_brackets_then("()") def __declare_structure(self, token): """Ignores structures between parentheses on structure declaration.""" - if token == "(": - self.__paren_count += 1 - elif token == ")": - # assert self.__paren_count > 0 - self.__paren_count -= 1 - elif self.__paren_count == 0: - self._state = self._state_global - if token == "{": - self.__braceless = False - else: - self.__braceless = True - self.context.add_bare_nesting() - self.__structure_brace_stack.append(True) + self.context.add_bare_nesting() + self._state = self._state_structure + if token != ")": + self._state(token) + + def _state_structure(self, token): + """Control-flow structure states right before the body.""" + self._state = self._state_global + if token == "{": + self.context.add_bare_nesting() + self.__structure_brace_stack.append(False) + else: self._state(token) def _state_global(self, token): """Dual-purpose state for global and structure bodies.""" + while self.__wait_for_pair and token != self.__wait_for_pair: + self.__pop_without_pair() + if token == "template": self._state = self._template_declaration @@ -140,16 +159,15 @@ class CLikeNestingStackStates(CodeStateMachine): elif token == "{": self.context.add_bare_nesting() - self.__structure_brace_stack.append(self.__braceless) - self.__braceless = None + self.__structure_brace_stack.append(None) # Non-structure braces. elif token == '}' or (token == ";" and self.__structure_brace_stack and self.__structure_brace_stack[-1]): - self.__braceless = None - self.__pop_braceless_structures() + self.__pop_structures() - elif token in self.__braceless_structures: - # assert self.__paren_count == 0 + elif token in self.__structures: + self.__wait_for_pair = False + self.__structure_brace_stack.append(token) if token == "else": self._state = self.__else_if_structure else:
terryyin/lizard
71478c51b2d16688efd489ae41e16f21c89df0ca
diff --git a/test/testNestedStructures.py b/test/testNestedStructures.py index 2f77547..5e565fb 100644 --- a/test/testNestedStructures.py +++ b/test/testNestedStructures.py @@ -175,6 +175,32 @@ class TestCppNestedStructures(unittest.TestCase): self.assertEqual(2, result[0].max_nested_structures) self.assertEqual(2, result[1].max_nested_structures) + def test_braceless_nested_if_try_structures(self): + result = process_cpp(""" + x c() { + if (a) + try { + throw 42; + } catch(...) { + if (b) return 42; + } + } + """) + self.assertEqual(3, result[0].max_nested_structures) + + def test_braceless_nested_for_try_structures(self): + result = process_cpp(""" + x c() { + for (;;) + try { + throw 42; + } catch(...) { + if (b) return 42; + } + } + """) + self.assertEqual(3, result[0].max_nested_structures) + def test_switch_case(self): """Switch-Case is one control structure.""" result = process_cpp(""" @@ -228,6 +254,74 @@ class TestCppNestedStructures(unittest.TestCase): """) self.assertEqual(3, result[0].max_nested_structures) + def test_braceless_consecutive_if_structures(self): + """Braceless structures one after another.""" + result = process_cpp(""" + x c() { + if (a) + if (b) + foobar(); + if (c) + if (d) + baz(); + } + """) + self.assertEqual(2, result[0].max_nested_structures) + + def test_braceless_consecutive_for_if_structures(self): + """Braceless structures one after another.""" + result = process_cpp(""" + x c() { + for (;;) + for (;;) + foobar(); + if (c) + if (d) + baz(); + } + """) + self.assertEqual(2, result[0].max_nested_structures) + + def test_braceless_consecutive_if_structures_with_return(self): + """Braceless structures one after another.""" + result = process_cpp(""" + x c() { + if (a) + if (b) + return true; + if (c) + if (d) + return false; + } + """) + self.assertEqual(2, result[0].max_nested_structures) + + def test_braceless_nested_if_else_structures(self): + result = process_cpp(""" + x c() { + if (a) + if (b) { + return b; + } else { + if (b) return 42; + } + } + """) + self.assertEqual(3, result[0].max_nested_structures) + + def test_braceless_nested_if_else_if_structures(self): + result = process_cpp(""" + x c() { + if (a) + if (b) { + return b; + } else if (c) { + if (b) return 42; + } + } + """) + self.assertEqual(3, result[0].max_nested_structures) + @unittest.skip("Unspecified. Not Implemented. Convoluted.") def test_struct_inside_declaration(self): """Extra complexity class/struct should be ignored."""
a non-structural failure case in NS metric Hi, I am currently running NS metric on my repository and faced a bug return statement. The following unit test clarifies my claim: ``` def test_non_structure_braces_with_return(self): """return statements in non-structural nesting level may confuse the nesting level.""" result = process_cpp(""" x c() { if (a) if (b) return false; if (c) if (d) return false; } """) self.assertEqual(3, result[0].max_nested_structures) # should be valued 2 ``` commit: 23ec9e8e0091bf24a13d30b72fbe4df5b77b971a Looking forward to hearing from you regarding this issue.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/testNestedStructures.py::TestCppNestedStructures::test_braceless_consecutive_for_if_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_braceless_consecutive_if_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_braceless_consecutive_if_structures_with_return", "test/testNestedStructures.py::TestCppNestedStructures::test_braceless_nested_for_try_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_braceless_nested_if_try_structures" ]
[ "test/testNestedStructures.py::TestCppNestedStructures::test_and_condition_in_if_structure", "test/testNestedStructures.py::TestCppNestedStructures::test_braceless_nested_if_else_if_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_braceless_nested_if_else_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_do", "test/testNestedStructures.py::TestCppNestedStructures::test_else_if", "test/testNestedStructures.py::TestCppNestedStructures::test_equal_metric_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_forever_loop", "test/testNestedStructures.py::TestCppNestedStructures::test_gotcha_if_else", "test/testNestedStructures.py::TestCppNestedStructures::test_if_structure", "test/testNestedStructures.py::TestCppNestedStructures::test_nested_if_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_nested_loop_mixed_brackets", "test/testNestedStructures.py::TestCppNestedStructures::test_no_structures", "test/testNestedStructures.py::TestCppNestedStructures::test_non_r_value_ref_in_body", "test/testNestedStructures.py::TestCppNestedStructures::test_non_structure_braces", "test/testNestedStructures.py::TestCppNestedStructures::test_scope", "test/testNestedStructures.py::TestCppNestedStructures::test_switch_case", "test/testNestedStructures.py::TestCppNestedStructures::test_terminator_in_parentheses", "test/testNestedStructures.py::TestCppNestedStructures::test_ternary_operator", "test/testNestedStructures.py::TestCppNestedStructures::test_try_catch", "test/testNestedStructures.py::TestCppNestedStructures::test_while", "test/testNestedStructures.py::TestPythonNestedStructures::test_condition_in_if_structure", "test/testNestedStructures.py::TestPythonNestedStructures::test_elif", "test/testNestedStructures.py::TestPythonNestedStructures::test_equal_metric_structures", "test/testNestedStructures.py::TestPythonNestedStructures::test_for_else", "test/testNestedStructures.py::TestPythonNestedStructures::test_for_structure", "test/testNestedStructures.py::TestPythonNestedStructures::test_if_structure", "test/testNestedStructures.py::TestPythonNestedStructures::test_nested_functions", "test/testNestedStructures.py::TestPythonNestedStructures::test_nested_if_structures", "test/testNestedStructures.py::TestPythonNestedStructures::test_no_structures", "test/testNestedStructures.py::TestPythonNestedStructures::test_try_catch", "test/testNestedStructures.py::TestPythonNestedStructures::test_two_functions", "test/testNestedStructures.py::TestPythonNestedStructures::test_while", "test/testNestedStructures.py::TestPythonNestedStructures::test_while_else", "test/testNestedStructures.py::TestPythonNestedStructures::test_with_structure" ]
{ "failed_lite_validators": [ "has_git_commit_hash", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-12-30T21:41:57Z"
mit
terryyin__lizard-191
diff --git a/lizard_languages/clike.py b/lizard_languages/clike.py index 33e1c3d..2c1af01 100644 --- a/lizard_languages/clike.py +++ b/lizard_languages/clike.py @@ -155,7 +155,7 @@ class CLikeNestingStackStates(CodeStateMachine): if token == "template": self._state = self._template_declaration - elif token in ("struct", "class", "namespace"): + elif token in ("struct", "class", "namespace", "union"): self._state = self._read_namespace elif token == "{":
terryyin/lizard
48de756b52b92705f2127353b54d5a4ddac71187
diff --git a/test/test_languages/testCAndCPP.py b/test/test_languages/testCAndCPP.py index cd5569b..41a1b13 100644 --- a/test/test_languages/testCAndCPP.py +++ b/test/test_languages/testCAndCPP.py @@ -461,6 +461,11 @@ class Test_c_cpp_lizard(unittest.TestCase): self.assertEqual(1, len(result)) self.assertEqual("A::foo", result[0].name) + def test_union_as_qualifier(self): + """Union as namespace for functions.""" + result = get_cpp_function_list("union A { void foo() {} };") + self.assertEqual(1, len(result)) + self.assertEqual("A::foo", result[0].name) class Test_cpp11_Attributes(unittest.TestCase): """C++11 extendable attributes can appear pretty much anywhere."""
Lizard not handling functions within unions correctly For the following code: ```c++ namespace foo{ void myFunction() { } union bar{ void mySecondFunction() { } }; class dog{ void bark() { } }; }; ``` Lizard generates the following output: ![doxunionbug](https://user-images.githubusercontent.com/29785071/27710254-b019601a-5ce4-11e7-9706-da253a28276e.png) This shows that the long_name generated by lizard includes the namespace or the class that a function is contained in but does not contain the union the function is contained in. This inconsistency can cause issues for code analysis tools.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_union_as_qualifier" ]
[ "test/test_languages/testCAndCPP.py::Test_C_Token_extension::test_connecting_marcro", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_1", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_braket_that_is_not_a_namespace", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_class_with_inheritance", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_complicated_c_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list_noexcept", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initializer_list", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_uniform_initialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_destructor_implementation", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_nested_template", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_slash_within_string", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_empty", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_followed_with_one_word_is_ok", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_noexcept", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_throw", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_declaration_is_not_counted", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_name_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_operator", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_that_returns_function_pointers", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_1_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_content", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_no_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param2", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_global_var_constructor", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_inline_operator", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_less_then_is_not_template", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_namespace_alias", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class_middle", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_unnamed_namespace", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_no_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_initializer_list", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_uniform_initialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_not_old_style_c_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function_has_semicolon", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_in_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_const", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_namespace", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_noexcept", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_throw", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_macro_in_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_only_word_can_be_function_name", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_shift", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_with_namespace", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_with_complicated_name", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_parentheses_before_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_pre_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_ref_qualifiers", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_param", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_return_type", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_stupid_macro_before_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_part_of_function_name", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_reference", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_full_specialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_partial_specialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function_specialization", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_pointer", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference_as_reference", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_trailing_return_type", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_simplest_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_typedef_is_not_old_style_c_function", "test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_underscore", "test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_class", "test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_control_structures", "test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function", "test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function_parameters", "test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function_return_type", "test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_namespace", "test/test_languages/testCAndCPP.py::Test_Preprocessing::test_content_macro_should_be_ignored", "test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessor_is_not_function", "test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessors_should_be_ignored_outside_function_implementation", "test/test_languages/testCAndCPP.py::Test_Big::test_trouble", "test/test_languages/testCAndCPP.py::Test_Dialects::test_cuda_kernel_launch" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media" ], "has_test_patch": true, "is_lite": false }
"2017-06-29T21:53:30Z"
mit
terryyin__lizard-328
diff --git a/lizard_languages/__init__.py b/lizard_languages/__init__.py index 94b9052..785a09d 100644 --- a/lizard_languages/__init__.py +++ b/lizard_languages/__init__.py @@ -3,6 +3,7 @@ from .clike import CLikeReader from .java import JavaReader from .javascript import JavaScriptReader +from .kotlin import KotlinReader from .python import PythonReader from .objc import ObjCReader from .ttcn import TTCNReader @@ -37,7 +38,8 @@ def languages(): LuaReader, RustReader, TypeScriptReader, - FortranReader + FortranReader, + KotlinReader ] diff --git a/lizard_languages/kotlin.py b/lizard_languages/kotlin.py new file mode 100644 index 0000000..f5c7b1b --- /dev/null +++ b/lizard_languages/kotlin.py @@ -0,0 +1,92 @@ +''' +Language parser for Apple Swift +''' + +from .clike import CCppCommentsMixin +from .code_reader import CodeReader, CodeStateMachine +from .golike import GoLikeStates +from .swift import SwiftReplaceLabel + + +class KotlinReader(CodeReader, CCppCommentsMixin, SwiftReplaceLabel): + # pylint: disable=R0903 + + ext = ['kt', 'kts'] + language_names = ['kotlin'] + _conditions = { + 'if', 'for', 'while', 'catch', '&&', '||', '?:' + } + + def __init__(self, context): + super(KotlinReader, self).__init__(context) + self.parallel_states = [KotlinStates(context)] + + @staticmethod + def generate_tokens(source_code, addition='', token_class=None): + return CodeReader.generate_tokens( + source_code, + r"|`\w+`" + + r"|\w+\?" + + r"|\w+\!!" + + r"|\?\?" + + r"|\?:" + + addition + ) + + +class KotlinStates(GoLikeStates): # pylint: disable=R0903 + + FUNC_KEYWORD = 'fun' + + def __init__(self, context, in_when_cases=False): + super().__init__(context) + self._in_when_cases = in_when_cases + + def _state_global(self, token): + if token in ('get', 'set'): + self.context.push_new_function(token) + self._state = self._expect_function_impl + elif token == '->': + if self._in_when_cases: + self.context.add_condition() + else: + self.context.push_new_function("(anonymous)") + self._state = super(KotlinStates, self)._expect_function_impl + elif token in ('val', 'var', ','): + self._state = self._expect_declaration_name + elif token == 'interface': + self._state = self._interface + elif token == 'when': + self._state = self._when_cases + else: + super(KotlinStates, self)._state_global(token) + + def _expect_declaration_name(self, token): + self._state = self._state_global + + def _expect_function_impl(self, token): + if token == '{' or token == '=': + self.next(self._function_impl, token) + + @CodeStateMachine.read_inside_brackets_then("{}") + def _interface(self, end_token): + if end_token == "}": + self._state = self._state_global + + def _function_name(self, token): + if token == "<": + self.next(self._template, token) + else: + return super(KotlinStates, self)._function_name(token) + + @CodeStateMachine.read_inside_brackets_then("<>", "_function_name") + def _template(self, tokens): + pass + + def _when_cases(self, token): + def callback(): + self.context.add_condition(inc=-1) + self.next(self._state_global) + if token != '{': + return + self.sub_state(KotlinStates(self.context, in_when_cases=True), callback) diff --git a/lizard_languages/swift.py b/lizard_languages/swift.py index dc4eed2..3b1cbcc 100644 --- a/lizard_languages/swift.py +++ b/lizard_languages/swift.py @@ -7,7 +7,24 @@ from .clike import CCppCommentsMixin from .golike import GoLikeStates -class SwiftReader(CodeReader, CCppCommentsMixin): +class SwiftReplaceLabel: + def preprocess(self, tokens): + tokens = list(t for t in tokens if not t.isspace() or t == '\n') + + def replace_label(tokens, target, replace): + for i in range(0, len(tokens) - len(target)): + if tokens[i:i + len(target)] == target: + for j, repl in enumerate(replace): + tokens[i + j] = repl + return tokens + + for k in (k for k in self.conditions if k.isalpha()): + tokens = replace_label(tokens, ["(", k, ":"], ["(", "_" + k, ":"]) + tokens = replace_label(tokens, [",", k, ":"], [",", "_" + k, ":"]) + return tokens + + +class SwiftReader(CodeReader, CCppCommentsMixin, SwiftReplaceLabel): # pylint: disable=R0903 FUNC_KEYWORD = 'def' @@ -30,20 +47,6 @@ class SwiftReader(CodeReader, CCppCommentsMixin): r"|\?\?" + addition) - def preprocess(self, tokens): - tokens = list(t for t in tokens if not t.isspace() or t == '\n') - - def replace_label(tokens, target, replace): - for i in range(0, len(tokens) - len(target)): - if tokens[i:i + len(target)] == target: - for j, repl in enumerate(replace): - tokens[i + j] = repl - return tokens - for k in (k for k in self.conditions if k.isalpha()): - tokens = replace_label(tokens, ["(", k, ":"], ["(", "_" + k, ":"]) - tokens = replace_label(tokens, [",", k, ":"], [",", "_" + k, ":"]) - return tokens - class SwiftStates(GoLikeStates): # pylint: disable=R0903 def _state_global(self, token):
terryyin/lizard
b93998d08b9cd08c8de91a65964f7a882e8883c9
diff --git a/test/test_languages/testKotlin.py b/test/test_languages/testKotlin.py new file mode 100644 index 0000000..a9e94bd --- /dev/null +++ b/test/test_languages/testKotlin.py @@ -0,0 +1,226 @@ +import unittest + +from lizard import analyze_file +from lizard_languages import KotlinReader + + +def get_kotlin_function_list(source_code): + return analyze_file.analyze_source_code( + "a.kt", source_code + ).function_list + + +class Test_tokenizing_Kotlin(unittest.TestCase): + + def check_tokens(self, expect, source): + tokens = list(KotlinReader.generate_tokens(source)) + self.assertEqual(expect, tokens) + + def test_dollar_var(self): + self.check_tokens(['`a`'], '`a`') + + +class Test_parser_for_Kotlin(unittest.TestCase): + + def test_empty(self): + functions = get_kotlin_function_list("") + self.assertEqual(0, len(functions)) + + def test_no_function(self): + result = get_kotlin_function_list(''' + for name in names { + println("Hello, \\(name)!") + } + ''') + self.assertEqual(0, len(result)) + + def test_one_function(self): + result = get_kotlin_function_list(''' + fun sayGoodbye() { } + ''') + self.assertEqual(1, len(result)) + self.assertEqual("sayGoodbye", result[0].name) + self.assertEqual(0, result[0].parameter_count) + self.assertEqual(1, result[0].cyclomatic_complexity) + + def test_one_with_parameter(self): + result = get_kotlin_function_list(''' + fun sayGoodbye(personName: String, alreadyGreeted: Bool) { } + ''') + self.assertEqual(1, len(result)) + self.assertEqual("sayGoodbye", result[0].name) + self.assertEqual(2, result[0].parameter_count) + + def test_one_function_with_return_value(self): + result = get_kotlin_function_list(''' + fun sayGoodbye(): String {return "bye"} + ''') + self.assertEqual(1, len(result)) + self.assertEqual("sayGoodbye", result[0].name) + + def test_one_lambda_with_return_value(self): + result = get_kotlin_function_list(''' + val sayGoodbye: () -> String = {"bye"} + ''') + self.assertEqual(1, len(result)) + self.assertEqual("(anonymous)", result[0].name) + + def test_one_function_with_complexity(self): + result = get_kotlin_function_list(''' + fun sayGoodbye() { if ++diceRoll == 7 { diceRoll = 1 }} + ''') + self.assertEqual(2, result[0].cyclomatic_complexity) + + def test_interface(self): + result = get_kotlin_function_list(''' + interface p { + fun f1(): String + fun f2() + } + fun sayGoodbye() { } + ''') + self.assertEqual(1, len(result)) + self.assertEqual("sayGoodbye", result[0].name) + + def test_interface_followed_by_a_class(self): + result = get_kotlin_function_list(''' + interface p { + fun f1(): String + fun f2() + } + class c { } + ''') + self.assertEqual(0, len(result)) + + def test_interface_with_vars(self): + result = get_kotlin_function_list(''' + interface p { + fun f1(): String + fun f2() + val p1: String + val p2: String + get() = "p2" + } + class c { } + ''') + self.assertEqual(0, len(result)) + + def test_getter(self): + result = get_kotlin_function_list(''' + class Time + { + var seconds: Double = 17.0 + var minutes: Double + get() = seconds / 60 + } + ''') + self.assertEqual("get", result[0].name) + + def test_getter_setter(self): + result = get_kotlin_function_list(''' + class Time + { + var seconds: Double = 17.0 + var minutes: Double + get() = seconds / 60 + set(newValue) { + this.seconds = (newValue * 60) + } + } + ''') + self.assertEqual("get", result[1].name) + self.assertEqual("set", result[0].name) + + # https://docs.kotlin.org/kotlin-book/LanguageGuide/Properties.html#ID259 + def test_explicit_getter_setter(self): + result = get_kotlin_function_list(''' + var center: Point + get() = { + val centerX = origin.x + (size.width / 2) + val centerY = origin.y + (size.height / 2) + return Point(x: centerX, y: centerY) + } + set(newCenter) { + origin.x = newCenter.x - (size.width / 2) + origin.y = newCenter.y - (size.height / 2) + } + } + ''') + self.assertEqual("set", result[0].name) + self.assertEqual("get", result[1].name) + + def test_when_cases(self): + result = get_kotlin_function_list(''' + fun cases(x: Int) { + when (x) { + 0, 1 -> print("x == 0 or x == 1") + else -> print("otherwise") + } + } + ''') + self.assertEqual("cases", result[0].name) + self.assertEqual(2, result[0].cyclomatic_complexity) + + def test_keyword_declarations(self): + result = get_kotlin_function_list(''' + enum class Func { + static var `class`: Bool? = false + static val `interface` = 0 + fun `get`() {} + } + ''') + self.assertEqual("`get`", result[0].name) + + def test_generic_function(self): + result = get_kotlin_function_list(''' + fun <T> f() {} + ''') + self.assertEqual("f", result[0].name) + + def test_complex_generic_function(self): + result = get_kotlin_function_list(''' + fun <C1, C2> f (c1: C1, c: C2): Boolean where C2 : Container {return C2.isEmpty()} + ''') + self.assertEqual("f", result[0].name) + self.assertEqual(2, result[0].parameter_count) + + def test_elvis_operator(self): + result = get_kotlin_function_list(''' fun f() { + val keep = filteredList?.contains(ingredient) ?: true + } + ''') + self.assertEqual("f", result[0].name) + self.assertEqual(2, result[0].cyclomatic_complexity) + + def test_for_label(self): + result = get_kotlin_function_list(''' + fun f0() { something(for: .something) } + fun f1() { something(for :.something) } + fun f2() { something(for : .something) } + fun f3() { something(for: if (isValid) true else false) } + fun f4() { something(label1: .something, label2: .something, for: .something) } + ''') + self.assertEqual(1, result[0].cyclomatic_complexity) + self.assertEqual(1, result[1].cyclomatic_complexity) + self.assertEqual(1, result[2].cyclomatic_complexity) + self.assertEqual(2, result[3].cyclomatic_complexity) + self.assertEqual(1, result[4].cyclomatic_complexity) + + def test_nested(self): + result = get_kotlin_function_list(''' + fun bar() : Int { + fun a() : Int { + // Do a load of stuff + return 1 + } + fun b() : Int { + // Do a load of stuff + return 1 + } + return a() + b() + } + ''') + self.assertEqual(3, len(result)) + self.assertEqual("a", result[0].name) + self.assertEqual("b", result[1].name) + self.assertEqual("bar", result[2].name)
Kotlin support for lizard? Hi. Any plans for lizard to be used for kotlin?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_languages/testKotlin.py::Test_tokenizing_Kotlin::test_dollar_var", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_complex_generic_function", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_elvis_operator", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_empty", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_explicit_getter_setter", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_for_label", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_generic_function", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_getter", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_getter_setter", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_interface", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_interface_followed_by_a_class", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_interface_with_vars", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_keyword_declarations", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_nested", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_no_function", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_function", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_function_with_complexity", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_function_with_return_value", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_lambda_with_return_value", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_with_parameter", "test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_when_cases" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-09-06T10:23:16Z"
mit
testing-cabal__fixtures-58
diff --git a/NEWS b/NEWS index af2c00c..1439d53 100644 --- a/NEWS +++ b/NEWS @@ -7,6 +7,7 @@ NEXT * Dropped support for Python 2.7, Python 3.4 and Python 3.5 (EOL). * Added support for Python 3.7-3.10. +* Support all ``subprocess.Popen`` arguments up to Python 3.10. 3.0.0 ~~~~~ diff --git a/fixtures/_fixtures/popen.py b/fixtures/_fixtures/popen.py index c35ed5e..ffa9bf4 100644 --- a/fixtures/_fixtures/popen.py +++ b/fixtures/_fixtures/popen.py @@ -20,6 +20,7 @@ __all__ = [ import random import subprocess +import sys from fixtures import Fixture @@ -126,13 +127,38 @@ class FakePopen(Fixture): stdin=_unpassed, stdout=_unpassed, stderr=_unpassed, preexec_fn=_unpassed, close_fds=_unpassed, shell=_unpassed, cwd=_unpassed, env=_unpassed, universal_newlines=_unpassed, - startupinfo=_unpassed, creationflags=_unpassed): + startupinfo=_unpassed, creationflags=_unpassed, + restore_signals=_unpassed, start_new_session=_unpassed, + pass_fds=_unpassed, *, group=_unpassed, extra_groups=_unpassed, + user=_unpassed, umask=_unpassed, encoding=_unpassed, + errors=_unpassed, text=_unpassed, pipesize=_unpassed): + # Reject arguments introduced by newer versions of Python in older + # versions; this makes it harder to accidentally hide compatibility + # problems using test doubles. + if sys.version_info < (3, 7) and text is not FakePopen._unpassed: + raise TypeError( + "FakePopen.__call__() got an unexpected keyword argument " + "'text'") + if sys.version_info < (3, 9): + for arg_name in "group", "extra_groups", "user", "umask": + if locals()[arg_name] is not FakePopen._unpassed: + raise TypeError( + "FakePopen.__call__() got an unexpected keyword " + "argument '{}'".format(arg_name)) + if sys.version_info < (3, 10) and pipesize is not FakePopen._unpassed: + raise TypeError( + "FakePopen.__call__() got an unexpected keyword argument " + "'pipesize'") + proc_args = dict(args=args) local = locals() for param in [ "bufsize", "executable", "stdin", "stdout", "stderr", "preexec_fn", "close_fds", "shell", "cwd", "env", - "universal_newlines", "startupinfo", "creationflags"]: + "universal_newlines", "startupinfo", "creationflags", + "restore_signals", "start_new_session", "pass_fds", "group", + "extra_groups", "user", "umask", "encoding", "errors", "text", + "pipesize"]: if local[param] is not FakePopen._unpassed: proc_args[param] = local[param] proc_info = self.get_info(proc_args)
testing-cabal/fixtures
7aa50f2059dd09cc4321462e5e24310d223c3350
diff --git a/fixtures/tests/_fixtures/test_popen.py b/fixtures/tests/_fixtures/test_popen.py index b0af3d3..cafd98e 100644 --- a/fixtures/tests/_fixtures/test_popen.py +++ b/fixtures/tests/_fixtures/test_popen.py @@ -15,6 +15,7 @@ import io import subprocess +import sys import testtools @@ -48,19 +49,59 @@ class TestFakePopen(testtools.TestCase, TestWithFixtures): proc = fixture(['foo']) self.assertEqual('stdout', proc.stdout) - def test_handles_all_2_7_args(self): + def test_handles_all_Popen_args(self): all_args = dict( args="args", bufsize="bufsize", executable="executable", stdin="stdin", stdout="stdout", stderr="stderr", preexec_fn="preexec_fn", close_fds="close_fds", shell="shell", cwd="cwd", env="env", universal_newlines="universal_newlines", - startupinfo="startupinfo", creationflags="creationflags") + startupinfo="startupinfo", creationflags="creationflags", + restore_signals="restore_signals", + start_new_session="start_new_session", pass_fds="pass_fds", + encoding="encoding", errors="errors") + if sys.version_info >= (3, 7): + all_args["text"] = "text" + if sys.version_info >= (3, 9): + all_args["group"] = "group" + all_args["extra_groups"] = "extra_groups" + all_args["user"] = "user" + all_args["umask"] = "umask" + if sys.version_info >= (3, 10): + all_args["pipesize"] = "pipesize" def get_info(proc_args): self.assertEqual(all_args, proc_args) return {} fixture = self.useFixture(FakePopen(get_info)) fixture(**all_args) + @testtools.skipUnless( + sys.version_info < (3, 7), "only relevant on Python <3.7") + def test_rejects_3_7_args_on_older_versions(self): + fixture = self.useFixture(FakePopen(lambda proc_args: {})) + with testtools.ExpectedException( + TypeError, r".* got an unexpected keyword argument 'text'"): + fixture(args="args", text=True) + + @testtools.skipUnless( + sys.version_info < (3, 9), "only relevant on Python <3.9") + def test_rejects_3_9_args_on_older_versions(self): + fixture = self.useFixture(FakePopen(lambda proc_args: {})) + for arg_name in ("group", "extra_groups", "user", "umask"): + kwargs = {arg_name: arg_name} + expected_message = ( + r".* got an unexpected keyword argument '{}'".format(arg_name)) + with testtools.ExpectedException(TypeError, expected_message): + fixture(args="args", **kwargs) + + @testtools.skipUnless( + sys.version_info < (3, 10), "only relevant on Python <3.10") + def test_rejects_3_10_args_on_older_versions(self): + fixture = self.useFixture(FakePopen(lambda proc_args: {})) + with testtools.ExpectedException( + TypeError, + r".* got an unexpected keyword argument 'pipesize'"): + fixture(args="args", pipesize=1024) + def test_custom_returncode(self): def get_info(proc_args): return dict(returncode=1)
`FakePopen` is not fully compatible with Python 3.7 Python 3.7 introduced the `text` parameter for `Popen` `FakePopen` has not yet been adjusted to this https://github.com/testing-cabal/fixtures/blob/a01ce5350a106dbce313b1a6370593227574379d/fixtures/_fixtures/popen.py#L125-L129
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_handles_all_Popen_args" ]
[ "fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test___call___is_recorded", "fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_custom_returncode", "fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_inject_content_stdout", "fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_installs_restores_global", "fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_rejects_3_10_args_on_older_versions", "fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_with_popen_custom", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_args", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate_with_input", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate_with_input_and_stdin", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate_with_out", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate_with_timeout", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_kill", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_poll", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_poll_with_returncode", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_wait", "fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_wait_with_timeout_and_endtime" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-02-09T10:50:21Z"
apache-2.0
testing-cabal__systemfixtures-9
diff --git a/systemfixtures/filesystem.py b/systemfixtures/filesystem.py index 40d9da0..f26a3ee 100644 --- a/systemfixtures/filesystem.py +++ b/systemfixtures/filesystem.py @@ -12,6 +12,7 @@ if six.PY2: BUILTIN_OPEN = "__builtin__.open" if six.PY3: BUILTIN_OPEN = "builtins.open" + from os import DirEntry GENERIC_APIS = ( @@ -139,6 +140,8 @@ class FakeFilesystem(Fixture): def _is_fake_path_or_fd(self, path, *args, **kwargs): if isinstance(path, int): path = self._path_from_fd(path) + elif isinstance(path, DirEntry): + path = path.name return self._is_fake_path(path) def _is_fake_symlink(self, src, dst, *args, **kwargs):
testing-cabal/systemfixtures
9c0908083a2f8914621ef5068c024ee41f84981a
diff --git a/systemfixtures/tests/test_filesystem.py b/systemfixtures/tests/test_filesystem.py index 5041bb0..ec3d26a 100644 --- a/systemfixtures/tests/test_filesystem.py +++ b/systemfixtures/tests/test_filesystem.py @@ -97,6 +97,12 @@ class FakeFilesystemTest(TestCase): shutil.rmtree("/foo/bar") self.assertEqual([], os.listdir("/foo")) + def test_copytree(self): + self.fs.add("/foo") + shutil.copytree("./doc", "/foo") + self.assertEqual( + sorted(os.listdir("./doc")), sorted(os.listdir("/foo"))) + if six.PY3: def test_listdir_with_fd(self):
shutil.copytree to an overlayed dir fails under Python 3.8 When copying a tree to an overlayed dir, I get the following exception casued by `_is_fake_path` not handling DirEntry params: ```python shutil.copytree(CHARM_DIR, self.charm_dir) File "/usr/lib/python3.8/shutil.py", line 554, in copytree return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks, File "/usr/lib/python3.8/shutil.py", line 496, in _copytree copy_function(srcobj, dstname) File "/usr/lib/python3.8/shutil.py", line 432, in copy2 copyfile(src, dst, follow_symlinks=follow_symlinks) File "/usr/lib/python3.8/shutil.py", line 261, in copyfile with open(src, 'rb') as fsrc, open(dst, 'wb') as fdst: File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/_overlay.py", line 23, in _new_value if self.condition(*args, **kwargs): File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/filesystem.py", line 146, in _is_fake_path_or_fd return self._is_fake_path(path) File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/filesystem.py", line 133, in _is_fake_path if path.startswith(prefix): AttributeError: 'posix.DirEntry' object has no attribute 'startswith' ``` A possible fix would be something like this: ```python 129 if isinstance(path, os.DirEntry): 130 path = path.name ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_copytree" ]
[ "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add_non_absolute", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add_sub_paths", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_chmod", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_chown", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_fchown", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_glob", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_listdir_with_fd", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_readlink_to_fake_path", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_readlink_to_real_path", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_rename", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_rmtree", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_sqlite3", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_symlink", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_unlink", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_walk" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2020-10-29T12:39:29Z"
mit
testingautomated-usi__uncertainty-wizard-152
diff --git a/uncertainty_wizard/quantifiers/mean_softmax.py b/uncertainty_wizard/quantifiers/mean_softmax.py index 6c32df0..d8f01f6 100644 --- a/uncertainty_wizard/quantifiers/mean_softmax.py +++ b/uncertainty_wizard/quantifiers/mean_softmax.py @@ -17,7 +17,7 @@ class MeanSoftmax(ConfidenceQuantifier): # docstr-coverage:inherited @classmethod def aliases(cls) -> List[str]: - return ["mean_softmax", "ensembling", "ms"] + return ["mean_softmax", "ensembling", "ms", "MeanSoftmax"] # docstr-coverage:inherited @classmethod diff --git a/uncertainty_wizard/quantifiers/mutual_information.py b/uncertainty_wizard/quantifiers/mutual_information.py index adfe725..5a0bc11 100644 --- a/uncertainty_wizard/quantifiers/mutual_information.py +++ b/uncertainty_wizard/quantifiers/mutual_information.py @@ -30,7 +30,7 @@ class MutualInformation(UncertaintyQuantifier): # docstr-coverage:inherited @classmethod def aliases(cls) -> List[str]: - return ["mutu_info", "mutual_information", "mi"] + return ["mutu_info", "mutual_information", "mi", "MutualInformation"] # docstr-coverage:inherited @classmethod diff --git a/uncertainty_wizard/quantifiers/predictive_entropy.py b/uncertainty_wizard/quantifiers/predictive_entropy.py index d29c39b..8a26a64 100644 --- a/uncertainty_wizard/quantifiers/predictive_entropy.py +++ b/uncertainty_wizard/quantifiers/predictive_entropy.py @@ -44,7 +44,7 @@ class PredictiveEntropy(UncertaintyQuantifier): # docstr-coverage:inherited @classmethod def aliases(cls) -> List[str]: - return ["predictive_entropy", "pred_entropy", "PE"] + return ["predictive_entropy", "pred_entropy", "PE", "PredictiveEntropy"] # docstr-coverage:inherited @classmethod diff --git a/uncertainty_wizard/quantifiers/variation_ratio.py b/uncertainty_wizard/quantifiers/variation_ratio.py index 6083373..aec4c46 100644 --- a/uncertainty_wizard/quantifiers/variation_ratio.py +++ b/uncertainty_wizard/quantifiers/variation_ratio.py @@ -30,7 +30,7 @@ class VariationRatio(UncertaintyQuantifier): # docstr-coverage:inherited @classmethod def aliases(cls) -> List[str]: - return ["variation_ratio", "vr", "var_ratio"] + return ["variation_ratio", "vr", "var_ratio", "VariationRatio"] # docstr-coverage:inherited @classmethod
testingautomated-usi/uncertainty-wizard
04fbec4de6c8f9ab70d7cd38891a225204706c11
diff --git a/tests_unit/quantifiers_tests/test_mean_softmax.py b/tests_unit/quantifiers_tests/test_mean_softmax.py index 02f1cfd..0b03011 100644 --- a/tests_unit/quantifiers_tests/test_mean_softmax.py +++ b/tests_unit/quantifiers_tests/test_mean_softmax.py @@ -17,6 +17,8 @@ class TestMeanSoftmax(TestCase): isinstance(QuantifierRegistry.find("mean_softmax"), MeanSoftmax) ) self.assertTrue(isinstance(QuantifierRegistry.find("ensembling"), MeanSoftmax)) + self.assertTrue(isinstance(QuantifierRegistry.find("MS"), MeanSoftmax)) + self.assertTrue(isinstance(QuantifierRegistry.find("MeanSoftmax"), MeanSoftmax)) def test_is_confidence(self): self.assertTrue(MeanSoftmax.is_confidence()) diff --git a/tests_unit/quantifiers_tests/test_mutual_information.py b/tests_unit/quantifiers_tests/test_mutual_information.py index 7eb2c11..fafc4dc 100644 --- a/tests_unit/quantifiers_tests/test_mutual_information.py +++ b/tests_unit/quantifiers_tests/test_mutual_information.py @@ -19,6 +19,10 @@ class TestMutualInformation(TestCase): self.assertTrue( isinstance(QuantifierRegistry.find("mutu_info"), MutualInformation) ) + self.assertTrue(isinstance(QuantifierRegistry.find("MI"), MutualInformation)) + self.assertTrue( + isinstance(QuantifierRegistry.find("MutualInformation"), MutualInformation) + ) def test_is_confidence(self): self.assertFalse(MutualInformation.is_confidence()) diff --git a/tests_unit/quantifiers_tests/test_one_shot_classifiers.py b/tests_unit/quantifiers_tests/test_one_shot_classifiers.py index 85bf274..1b2b6a2 100644 --- a/tests_unit/quantifiers_tests/test_one_shot_classifiers.py +++ b/tests_unit/quantifiers_tests/test_one_shot_classifiers.py @@ -63,6 +63,12 @@ class TestPCS(TestCase): self.assertTrue( isinstance(QuantifierRegistry.find("PCS"), PredictionConfidenceScore) ) + self.assertTrue( + isinstance( + QuantifierRegistry.find("PredictionConfidenceScore"), + PredictionConfidenceScore, + ) + ) self.assertTrue( isinstance( QuantifierRegistry.find("prediction_confidence_score"), @@ -140,6 +146,7 @@ class TestSoftmax(TestCase): def test_string_representation(self): self.assertTrue(isinstance(QuantifierRegistry.find("softmax"), MaxSoftmax)) self.assertTrue(isinstance(QuantifierRegistry.find("max_softmax"), MaxSoftmax)) + self.assertTrue(isinstance(QuantifierRegistry.find("MaxSoftmax"), MaxSoftmax)) def test_is_confidence(self): self.assertTrue(MaxSoftmax.is_confidence()) @@ -213,6 +220,7 @@ class TestSoftmaxEntropy(TestCase): self.assertTrue( isinstance(QuantifierRegistry.find("SoftmaxEntropy"), SoftmaxEntropy) ) + self.assertTrue(isinstance(QuantifierRegistry.find("SE"), SoftmaxEntropy)) def test_is_confidence(self): self.assertFalse(SoftmaxEntropy.is_confidence()) diff --git a/tests_unit/quantifiers_tests/test_predictive_entropy.py b/tests_unit/quantifiers_tests/test_predictive_entropy.py index cebcf67..60be40f 100644 --- a/tests_unit/quantifiers_tests/test_predictive_entropy.py +++ b/tests_unit/quantifiers_tests/test_predictive_entropy.py @@ -19,6 +19,10 @@ class TestPredictiveEntropy(TestCase): self.assertTrue( isinstance(QuantifierRegistry.find("pred_entropy"), PredictiveEntropy) ) + self.assertTrue(isinstance(QuantifierRegistry.find("PE"), PredictiveEntropy)) + self.assertTrue( + isinstance(QuantifierRegistry.find("PredictiveEntropy"), PredictiveEntropy) + ) def test_is_confidence(self): self.assertFalse(PredictiveEntropy.is_confidence()) diff --git a/tests_unit/quantifiers_tests/test_stddev.py b/tests_unit/quantifiers_tests/test_stddev.py index df43465..dbbea1c 100644 --- a/tests_unit/quantifiers_tests/test_stddev.py +++ b/tests_unit/quantifiers_tests/test_stddev.py @@ -17,6 +17,9 @@ class TestStandardDeviation(TestCase): isinstance(QuantifierRegistry.find("standard_deviation"), StandardDeviation) ) self.assertTrue(isinstance(QuantifierRegistry.find("std"), StandardDeviation)) + self.assertTrue( + isinstance(QuantifierRegistry.find("StandardDeviation"), StandardDeviation) + ) self.assertTrue( isinstance(QuantifierRegistry.find("stddev"), StandardDeviation) ) diff --git a/tests_unit/quantifiers_tests/test_variation_ratio.py b/tests_unit/quantifiers_tests/test_variation_ratio.py index 8462857..a814631 100644 --- a/tests_unit/quantifiers_tests/test_variation_ratio.py +++ b/tests_unit/quantifiers_tests/test_variation_ratio.py @@ -20,6 +20,9 @@ class TestVariationRatio(TestCase): isinstance(QuantifierRegistry.find("var_ratio"), VariationRatio) ) self.assertTrue(isinstance(QuantifierRegistry.find("VR"), VariationRatio)) + self.assertTrue( + isinstance(QuantifierRegistry.find("VariationRatio"), VariationRatio) + ) def test_is_confidence(self): self.assertFalse(VariationRatio.is_confidence())
Alias `VariationRatio` is missing the VariationRatio quantifier misses the corresponding class-name alias, which should be there, according to the docs: https://uncertainty-wizard.readthedocs.io/en/latest/user_guide_quantifiers.html
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_string_representation", "tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_string_representation", "tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_string_representation", "tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_string_representation" ]
[ "tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_as_confidence_flag", "tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_is_confidence", "tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_problem_type", "tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_samples_type_declaration", "tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_single_input_no_entropy", "tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_two_inputs_high_pred_entropy", "tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_as_confidence_flag", "tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_is_confidence", "tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_problem_type", "tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_samples_type_declaration", "tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_single_input_no_entropy", "tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_two_inputs_high_pred_entropy", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_is_confidence", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_problem_type", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_quantification", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_samples_type_declaration", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_string_representation", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_duplicate_non_winner", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_duplicate_winner", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_happy_path_batch", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_happy_path_single", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_is_confidence", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_problem_type", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_samples_type_declaration", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_string_representation", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_duplicate_non_winner", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_duplicate_winner", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_happy_path_batch", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_happy_path_single", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_is_confidence", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_problem_type", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_samples_type_declaration", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_string_representation", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_duplicate_non_winner", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_duplicate_winner", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_happy_path_batch", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_happy_path_single", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_is_confidence", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_problem_type", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_samples_type_declaration", "tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_string_representation", "tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_is_confidence", "tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_problem_type", "tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_samples_type_declaration", "tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_single_input_no_entropy", "tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_two_inputs_high_pred_entropy", "tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_happy_path_single", "tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_is_confidence", "tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_problem_type", "tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_samples_type_declaration", "tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_string_representation", "tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_happy_path_batch", "tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_happy_path_single", "tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_is_confidence", "tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_problem_type", "tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_samples_type_declaration" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-02-03T08:42:43Z"
mit
textile__python-textile-41
diff --git a/textile/core.py b/textile/core.py index 692cca4..9c6623f 100644 --- a/textile/core.py +++ b/textile/core.py @@ -230,7 +230,7 @@ class Textile(object): self.unreferencedNotes = OrderedDict() self.notelist_cache = OrderedDict() - if text == '': + if text.strip() == '': return text if self.restricted: @@ -811,7 +811,7 @@ class Textile(object): """If we find a closing square bracket we are going to see if it is balanced. If it is balanced with matching opening bracket then it is part of the URL else we spit it back out of the URL.""" - # If counts['['] is None, count the occurrences of '[' + # If counts['['] is None, count the occurrences of '[' counts['['] = counts['['] or url.count('[') if counts['['] == counts[']']:
textile/python-textile
f2a9408cdeea585861d76f6fa85e1ba37c9d011f
diff --git a/tests/test_github_issues.py b/tests/test_github_issues.py index bf9c339..b8a8330 100644 --- a/tests/test_github_issues.py +++ b/tests/test_github_issues.py @@ -91,3 +91,9 @@ def test_github_issue_36(): result = textile.textile(text) expect = '\t<p><a href="https://www.google.com/search?q=Chögyam+Trungpa">Chögyam Trungpa</a></p>' assert result == expect + +def test_github_issue_40(): + text = '\r\n' + result = textile.textile(text) + expect = '\r\n' + assert result == expect
IndexError string index out of range on whitespace only string When I try to process a string that is whitespace only, I get an `IndexError`: ``` In [1]: from textile import textile In [2]: textile(' ') --------------------------------------------------------------------------- IndexError Traceback (most recent call last) ``` Ref: https://github.com/textile/python-textile/issues/26
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_github_issues.py::test_github_issue_40" ]
[ "tests/test_github_issues.py::test_github_issue_16", "tests/test_github_issues.py::test_github_issue_17", "tests/test_github_issues.py::test_github_issue_20", "tests/test_github_issues.py::test_github_issue_21", "tests/test_github_issues.py::test_github_issue_22", "tests/test_github_issues.py::test_github_issue_26", "tests/test_github_issues.py::test_github_issue_27", "tests/test_github_issues.py::test_github_issue_28", "tests/test_github_issues.py::test_github_issue_36" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2017-03-30T21:26:10Z"
mit
theavey__ParaTemp-31
diff --git a/paratemp/coordinate_analysis.py b/paratemp/coordinate_analysis.py index a204645..924d998 100644 --- a/paratemp/coordinate_analysis.py +++ b/paratemp/coordinate_analysis.py @@ -112,7 +112,7 @@ class Universe(MDa.Universe): if overwrite or ('/'+time not in store.keys()): store[time] = self._data else: - store_cols = store.get_node(time).axis0.read() + store_cols = store.get_node(time).axis0.read().astype(str) set_diff_cols = set(self._data.columns).difference(store_cols) if not set_diff_cols: if self._verbosity: @@ -160,10 +160,12 @@ class Universe(MDa.Universe): if self._verbosity: print('No data to read in ' '{}[{}]'.format(filename, time)) + return for key in keys_to_read: self._data[key] = read_df[key] def calculate_distances(self, recalculate=False, ignore_file_change=False, + read_data=True, save_data=True, *args, **kwargs): """ Calculate distances by iterating through the trajectory @@ -180,6 +182,13 @@ class Universe(MDa.Universe): the file has changed will be printed. If False, if the length of the trajectory has changed, FileChangedError will be raised. + :param bool read_data: Default: True. + If True, :func:`read_data` will be used to read any data in the + default file with `ignore_no_data=True`. + :param bool save_data: Default: True. + If True, :func:`save_data` will be used to save the calculated + distances to the default file. + Nothing will be saved if there is nothing new to calculate. :param args: :param kwargs: :return: None @@ -190,6 +199,11 @@ class Universe(MDa.Universe): # TODO document this function # TODO find a way to take keyword type args with non-valid python # identifiers (e.g., "O-O"). + if read_data: + v = self._verbosity + self._verbosity = False + self.read_data(ignore_no_data=True) + self._verbosity = v # Make empty atom selections to be appended to: first_group = self.select_atoms('protein and not protein') second_group = self.select_atoms('protein and not protein') @@ -270,6 +284,8 @@ class Universe(MDa.Universe): result=dists[i]) for i, column in enumerate(column_names): self._data[column] = dists[:, i] + if save_data: + self.save_data() def calculate_dihedrals(self, *args, **kwargs): """""" diff --git a/requirements.txt b/requirements.txt index 4783d85..6719eeb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ matplotlib panedr py gromacswrapper +tables typing scipy six diff --git a/setup.py b/setup.py index 141b571..fc3bb71 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,7 @@ setup( 'matplotlib', 'panedr', 'gromacswrapper', + 'tables', 'typing', 'scipy', 'six',
theavey/ParaTemp
a8c11fea5fd99af1e66418aab6ac1743ea527cce
diff --git a/tests/test_coordinate_analysis.py b/tests/test_coordinate_analysis.py index fc4f174..2ea9283 100644 --- a/tests/test_coordinate_analysis.py +++ b/tests/test_coordinate_analysis.py @@ -24,9 +24,11 @@ from __future__ import absolute_import -import pytest -import numpy as np import matplotlib +import numpy as np +import pandas as pd +import py +import pytest matplotlib.use('agg') @@ -41,24 +43,35 @@ def test_matplotlib_testing_backend(): class TestXTCUniverse(object): @pytest.fixture - def univ(self): + def univ(self, tmpdir): from paratemp import coordinate_analysis as ca - _univ = ca.Universe('tests/test-data/spc2.gro', - 'tests/test-data/t-spc2-traj.xtc', - temp=205.) + gro = py.path.local('tests/test-data/spc2.gro') + traj = py.path.local('tests/test-data/t-spc2-traj.xtc') + gro.copy(tmpdir) + traj.copy(tmpdir) + with tmpdir.as_cwd(): + _univ = ca.Universe(gro.basename, + traj.basename, + temp=205.) return _univ @pytest.fixture def univ_w_a(self, univ): - univ.calculate_distances(a='4 5') + univ.calculate_distances(a='4 5', + read_data=False, save_data=False) return univ @pytest.fixture - def univ_pbc(self): + def univ_pbc(self, tmpdir): from paratemp import coordinate_analysis as ca - _univ = ca.Universe('tests/test-data/spc2.gro', - 'tests/test-data/spc2-traj-pbc.xtc', - temp=205.) + gro = py.path.local('tests/test-data/spc2.gro') + traj = py.path.local('tests/test-data/spc2-traj-pbc.xtc') + gro.copy(tmpdir) + traj.copy(tmpdir) + with tmpdir.as_cwd(): + _univ = ca.Universe(gro.basename, + traj.basename, + temp=205.) return _univ @pytest.fixture @@ -93,19 +106,23 @@ class TestXTCUniverse(object): return np.load('tests/ref-data/spc2-fes1d-bins-20.npy') def test_distance_str(self, univ, ref_a_dists): - univ.calculate_distances(a='4 5') + univ.calculate_distances(a='4 5', + read_data=False, save_data=False) assert np.isclose(ref_a_dists, univ.data['a']).all() def test_distance_list_int(self, univ, ref_a_dists): - univ.calculate_distances(a=[4, 5]) + univ.calculate_distances(a=[4, 5], + read_data=False, save_data=False) assert np.isclose(ref_a_dists, univ.data['a']).all() def test_distance_list_str(self, univ, ref_a_dists): - univ.calculate_distances(a=['4', '5']) + univ.calculate_distances(a=['4', '5'], + read_data=False, save_data=False) assert np.isclose(ref_a_dists, univ.data['a']).all() def test_calculate_distances_no_recalc(self, univ_w_a, capsys): - univ_w_a.calculate_distances(a=[4, 5]) + univ_w_a.calculate_distances(a=[4, 5], + read_data=False, save_data=False) out, err = capsys.readouterr() assert out == 'Nothing (new) to calculate here.\n' @@ -113,11 +130,13 @@ class TestXTCUniverse(object): """ :type univ_w_a: paratemp.coordinate_analysis.Universe """ - univ_w_a.calculate_distances(a='5 5', recalculate=True) + univ_w_a.calculate_distances(a='5 5', recalculate=True, + read_data=False, save_data=False) assert (np.array([0., 0.]) == univ_w_a.data['a']).all() def test_distance_pbc(self, univ_pbc, ref_a_pbc_dists): - univ_pbc.calculate_distances(a='4 5') + univ_pbc.calculate_distances(a='4 5', + read_data=False, save_data=False) assert np.isclose(ref_a_pbc_dists['a'], univ_pbc.data['a']).all() def test_calc_fes_1d(self, univ_w_a, ref_delta_g, ref_bins, ref_delta_g_20, @@ -145,7 +164,7 @@ class TestXTCUniverse(object): def test_fes_1d_data_str(self, univ_w_a, ref_delta_g, ref_bins): """ - :type univ_w_a: paratemp.coordinate_analysiss.Universe + :type univ_w_a: paratemp.coordinate_analysis.Universe :type ref_delta_g: np.ndarray :type ref_bins: np.ndarray """ @@ -174,6 +193,102 @@ class TestXTCUniverse(object): univ._last_time = 5.1e12 assert univ.final_time_str == '5100ms' + def test_save_data(self, univ_w_a, tmpdir, capsys): + time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns' + f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5') + with tmpdir.as_cwd(): + univ_w_a.save_data() + out, err = capsys.readouterr() + assert tmpdir.join(f_name).exists() + with pd.HDFStore(f_name) as store: + df = store[time] + assert out == 'Saved data to {f_name}[{time}]\n'.format( + f_name=f_name, time=time) + assert np.allclose(df, univ_w_a.data) + + def test_save_data_no_new(self, univ_w_a, tmpdir, capsys): + time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns' + f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5') + with tmpdir.as_cwd(): + univ_w_a.save_data() + capsys.readouterr() + univ_w_a.save_data() + out, err = capsys.readouterr() + assert tmpdir.join(f_name).exists() + with pd.HDFStore(f_name) as store: + df = store[time] + assert out == 'No data added to {f_name}[{time}]\n'.format( + f_name=f_name, time=time) + assert np.allclose(df, univ_w_a.data) + + def test_save_data_add_new(self, univ, univ_w_a, tmpdir, capsys): + time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns' + f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5') + with tmpdir.as_cwd(): + univ_w_a.save_data() + capsys.readouterr() + univ.calculate_distances(b='4 5', save_data=False) + univ.save_data() + out, err = capsys.readouterr() + assert out == 'Saved data to {f_name}[{time}]\n'.format( + f_name=f_name, time=time) + + def test_read_data(self, univ, univ_w_a, tmpdir, capsys): + """ + :type univ_w_a: paratemp.Universe + :type univ: paratemp.Universe + """ + with tmpdir.as_cwd(): + univ_w_a.save_data() + capsys.readouterr() # just so it doesn't print + univ.read_data() + assert (univ_w_a.data == univ.data).all().all() + + def test_read_data_no_data(self, univ, tmpdir, capsys): + """ + :type univ: paratemp.Universe + """ + time = 'time_' + str(int(univ._last_time / 1000)) + 'ns' + f_name = univ.trajectory.filename.replace('xtc', 'h5') + with tmpdir.as_cwd(): + with pytest.raises(IOError, message='This data does not exist!\n' + '{}[{}]\n'.format(f_name, + time)): + univ.read_data() + univ.read_data(ignore_no_data=True) + out, err = capsys.readouterr() + assert out == 'No data to read in {}[{}]\n'.format(f_name, time) + + def test_calculate_distances_save(self, univ, tmpdir, capsys): + """ + :type univ: paratemp.Universe + """ + time = 'time_' + str(int(univ._last_time / 1000)) + 'ns' + f_name = univ.trajectory.filename.replace('xtc', 'h5') + with tmpdir.as_cwd(): + univ.calculate_distances(a='4 5') + out, err = capsys.readouterr() + assert tmpdir.join(f_name).exists() + with pd.HDFStore(f_name) as store: + df = store[time] + assert out == 'Saved data to {f_name}[{time}]\n'.format( + f_name=f_name, time=time) + assert np.allclose(df, univ.data) + + def test_calculate_distances_read(self, univ_w_a, tmpdir, capsys): + """ + :type univ_w_a: paratemp.Universe + """ + with tmpdir.as_cwd(): + univ_w_a.save_data() + capsys.readouterr() + univ_w_a._data = univ_w_a._init_dataframe() + univ_w_a.calculate_distances(a='4 5') + out, err = capsys.readouterr() + assert out == 'Nothing (new) to calculate here.\n' + + + # TODO add further Universe tests # ignore_file_change=True # fes_2d
make read_data and save_data defaults for Universe.calculate... At least for me, I generally want to run these anyway, so it would make sense to make them run by default to save as much time as possible. I don't imagine it would change the memory usage that much. Plus, if it's saved, it's much easier to read it back in later if the RAM needs to be freed.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_coordinate_analysis.py::TestXTCUniverse::test_distance_str", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_distance_list_int", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_distance_list_str", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_calculate_distances_no_recalc", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_calculate_distances_yes_recalc", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_distance_pbc", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_calc_fes_1d", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_fes_1d_data_str", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_fes_1d_data_data" ]
[ "tests/test_coordinate_analysis.py::test_matplotlib_testing_backend", "tests/test_coordinate_analysis.py::TestXTCUniverse::test_final_time_str" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-06-12T19:42:20Z"
apache-2.0
theelous3__asks-100
diff --git a/asks/request_object.py b/asks/request_object.py index b9bfd32..6c05b00 100644 --- a/asks/request_object.py +++ b/asks/request_object.py @@ -424,7 +424,8 @@ class RequestProcessor: return c_type, str(len(body)), body - def _dict_to_query(self, data, params=True, base_query=False): + @staticmethod + def _dict_to_query(data, params=True, base_query=False): ''' Turns python dicts in to valid body-queries or queries for use directly in the request url. Unlike the stdlib quote() and it's variations, @@ -439,7 +440,7 @@ class RequestProcessor: query = [] for k, v in data.items(): - if not v: + if v is None: continue if isinstance(v, (str, Number)): query.append('='.join(quote_plus(x) for x in (k, str(v))))
theelous3/asks
733f277b7fa84e17afb4d8c3b0895bec7efb8ab4
diff --git a/tests/test_request_object.py b/tests/test_request_object.py index 0f1c989..9627a99 100644 --- a/tests/test_request_object.py +++ b/tests/test_request_object.py @@ -1,6 +1,7 @@ # pylint: disable=no-member import h11 +import pytest from asks.request_object import RequestProcessor @@ -32,3 +33,13 @@ def test_http1_1(monkeypatch): def test_http1_0(monkeypatch): response = _catch_response(monkeypatch, [('Connection', 'close')], b'hello') assert response.body == b'hello' + + [email protected](['data', 'query_str'], [ + [{'foo': 'bar', 'spam': None}, '?foo=bar'], + [{'zero': 0}, '?zero=0'], + [{'empty': ''}, '?empty='], + [{'false': False}, '?false=False'], +]) +def test_dict_to_query(data, query_str): + assert RequestProcessor._dict_to_query(data) == query_str
`request_object.RequestProcessor._dict_to_query` skips dict values evaluating to `False` Test case: ```python response = await asks.get('https://httpbin.org/get', params={ 'foo': 'bar', 'zero': 0, 'empty': '', 'false': False }) payload = response.json() assert 'foo' in payload['args'] # OK assert 'zero' in payload['args'] # Fail assert 'empty' in payload['args'] # Fail assert 'false' in payload['args'] # Fail ``` Erroneous code: https://github.com/theelous3/asks/blob/733f277b7fa84e17afb4d8c3b0895bec7efb8ab4/asks/request_object.py#L442-L443
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_request_object.py::test_dict_to_query[data0-?foo=bar]", "tests/test_request_object.py::test_dict_to_query[data1-?zero=0]", "tests/test_request_object.py::test_dict_to_query[data2-?empty=]", "tests/test_request_object.py::test_dict_to_query[data3-?false=False]" ]
[ "tests/test_request_object.py::test_http1_1", "tests/test_request_object.py::test_http1_0" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2019-01-15T08:14:55Z"
mit
theelous3__asks-173
diff --git a/asks/http_utils.py b/asks/http_utils.py index 45ae42d..9d8d56b 100644 --- a/asks/http_utils.py +++ b/asks/http_utils.py @@ -3,18 +3,15 @@ Utilities for handling some aspects of http """ -__all__ = ["decompress", "parse_content_encoding"] +__all__ = ["decompress", "decompress_one", "parse_content_encoding"] -from gzip import decompress as gdecompress -from zlib import decompress as zdecompress +import codecs +from zlib import decompressobj, MAX_WBITS from .utils import processor -_compression_mapping = {"gzip": gdecompress, "deflate": zdecompress} - - def parse_content_encoding(content_encoding: str) -> [str]: compressions = [x.strip() for x in content_encoding.split(",")] return compressions @@ -23,11 +20,47 @@ def parse_content_encoding(content_encoding: str) -> [str]: @processor def decompress(compressions, encoding=None): data = b"" + # https://tools.ietf.org/html/rfc7231 + # "If one or more encodings have been applied to a representation, the + # sender that applied the encodings MUST generate a Content-Encoding + # header field that lists the content codings in the order in which + # they were applied." + # Thus, reversed(compressions). + decompressors = [ + decompress_one(compression) for compression in reversed(compressions) + ] + if encoding: + decompressors.append(make_decoder_shim(encoding)) + while True: + data = yield data + for decompressor in decompressors: + data = decompressor.send(data) + + +# https://tools.ietf.org/html/rfc7230#section-4.2.1 - #section-4.2.3 + +DECOMPRESS_WBITS = { + "deflate": MAX_WBITS, + "gzip": MAX_WBITS + 16, + "x-gzip": MAX_WBITS + 16, +} + + +@processor +def decompress_one(compression): + data = b"" + decompressor = decompressobj(wbits=DECOMPRESS_WBITS[compression]) + while True: + data = yield data + data = decompressor.decompress(data) + yield decompressor.flush() + + +@processor +def make_decoder_shim(encoding): + data = b"" + decoder = codecs.getincrementaldecoder(encoding)(errors="replace") while True: - if encoding: - data = yield data.decode(encoding, errors="replace") - else: - data = yield data - for compression in compressions: - if compression in _compression_mapping: - data = _compression_mapping[compression](data) + data = yield data + data = decoder.decode(data) + yield decoder.decode(b"", final=True)
theelous3/asks
774af51d69ffff0245801d7b4b79a97e6318f5f9
diff --git a/tests/test_http_utils.py b/tests/test_http_utils.py new file mode 100644 index 0000000..026407b --- /dev/null +++ b/tests/test_http_utils.py @@ -0,0 +1,45 @@ +import zlib +import gzip + +import pytest + +from asks import http_utils + +INPUT_DATA = b"abcdefghijklmnopqrstuvwxyz" +UNICODE_INPUT_DATA = "\U0001f408\U0001F431" * 5 + + [email protected]( + "compressor,name", [(zlib.compress, "deflate"), (gzip.compress, "gzip")] +) +def test_decompress_one_zlib(compressor, name): + data = zlib.compress(INPUT_DATA) + decompressor = http_utils.decompress_one("deflate") + result = b"" + for i in range(len(data)): + b = data[i : i + 1] + result += decompressor.send(b) + assert result == INPUT_DATA + + +def test_decompress(): + # we don't expect to see multiple compression types in the wild + # but test anyway + data = zlib.compress(gzip.compress(INPUT_DATA)) + decompressor = http_utils.decompress(["gzip", "deflate"]) + result = b"" + for i in range(len(data)): + b = data[i : i + 1] + result += decompressor.send(b) + assert result == INPUT_DATA + + +def test_decompress_decoding(): + data = zlib.compress(UNICODE_INPUT_DATA.encode("utf-8")) + decompressor = http_utils.decompress(["deflate"], encoding="utf-8") + result = "" + for i in range(len(data)): + b = data[i : i + 1] + res = decompressor.send(b) + result += res + assert result == UNICODE_INPUT_DATA
Chunked and encoded gzip not decompressing correctly in streams From https://github.com/theelous3/asks/issues/95 specifically https://github.com/theelous3/asks/issues/95#issuecomment-435187332 The gzip module blows and isn't sophisticated enough to decompress streams. Drop it in favour of full zlib. https://docs.python.org/3/library/zlib.html#zlib.decompress https://stackoverflow.com/a/22311297
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_http_utils.py::test_decompress_one_zlib[compress-deflate]", "tests/test_http_utils.py::test_decompress_one_zlib[compress-gzip]", "tests/test_http_utils.py::test_decompress", "tests/test_http_utils.py::test_decompress_decoding" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2020-09-21T13:37:59Z"
mit
thegetty__crom-50
diff --git a/README.md b/README.md index 73b4a2a..bab3576 100644 --- a/README.md +++ b/README.md @@ -18,12 +18,20 @@ Import the classes from the model module. As the classes are dynamically generat ```python from cromulent.model import factory, Group -g1 = Group("Organization") -g2 = Group("Department") +g1 = Group(ident="Organization") +g2 = Group(ident="Department") g1.member = g2 print factory.toString(g1, compact=False) ``` +The constructor for the classes takes the following parameters: + +* `ident` - an identifier to use for this instance. If specified, it should be a URI represented as a string. If it is the empty string, it will result in no identifier. If not specified, or specified as `None`, then it will be auto-generated by the factory if `auto_assign_id` is true, or if `auto_assign_id` is false, then it will result in no identifier. +* `label` - a human readable label for the resource, to act as internal documentation for the data +* `value` or `content` - a data value for the class. Dimensions and MonetaryAmounts use `value` which must be a number, and Name, Identifier, LinguisticObject and similar use `content` which must be a string. +* Additional keywords may be passed in, and will be sent to class-specific initialization code. + + ### Vocabulary ```python @@ -38,6 +46,7 @@ print factory.toString(h, compact=False) * Assigning to the same property repeatedly does NOT overwrite the value, instead it appends. To overwrite a value, instead set it to a false value first. + ### Factory settings There are quite a few settings for how the module works, which are managed by a `factory` object. diff --git a/cromulent/model.py b/cromulent/model.py index f5ab3ae..05e9034 100644 --- a/cromulent/model.py +++ b/cromulent/model.py @@ -358,9 +358,9 @@ class ExternalResource(object): _type = "" _embed = True - def __init__(self, ident=""): + def __init__(self, ident=None): self._factory = factory - if ident: + if ident is not None: if ident.startswith('urn:uuid'): self.id = ident elif ident.startswith('http'): @@ -378,6 +378,9 @@ class ExternalResource(object): ident = "%s:%s" % (self._factory.prefixes_rev[pref], rest) self.id = ident + elif ident == "": + # Allow explicit setting of empty string + self.id = "" else: # Allow for prefixed term curied = ident.split(':', 1) @@ -386,10 +389,10 @@ class ExternalResource(object): self._full_id = self._factory.prefixes[curied[0]] + curied[1] else: self.id = factory.base_url + self.__class__._uri_segment + "/" + ident - elif factory.auto_assign_id: self.id = factory.generate_id(self) else: + # Not auto assigning, and not submitted = blank node self.id = "" def _toJSON(self, done, top=None): @@ -408,7 +411,7 @@ class BaseResource(ExternalResource): _classification = "" _classhier = [] - def __init__(self, ident="", label="", value="", content="", **kw): + def __init__(self, ident=None, label="", value="", content="", **kw): """Initialize BaseObject.""" super(BaseResource, self).__init__(ident)
thegetty/crom
5c812f1a0acd98311143b6f63185d2c3f2cc23b7
diff --git a/tests/test_model.py b/tests/test_model.py index 5500935..657899b 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -210,17 +210,20 @@ class TestBuildClass(unittest.TestCase): class TestAutoIdentifiers(unittest.TestCase): def test_bad_autoid(self): + model.factory.auto_assign_id = True model.factory.auto_id_type = "broken" self.assertRaises(model.ConfigurationError, model.factory.generate_id, "irrelevant") def test_int(self): + model.factory.auto_assign_id = True model.factory.auto_id_type = "int" p = model.Person() p2 = model.Activity() self.assertEqual(int(p.id[-1]), int(p2.id[-1])-1) def test_int_per_type(self): + model.factory.auto_assign_id = True model.factory.auto_id_type = "int-per-type" p = model.Person() p2 = model.Person() @@ -229,6 +232,7 @@ class TestAutoIdentifiers(unittest.TestCase): self.assertEqual(int(p.id[-1]), int(p3.id[-1])) def test_int_per_segment(self): + model.factory.auto_assign_id = True model.factory._auto_id_segments = {} model.factory.auto_id_type = "int-per-segment" model.Activity._uri_segment = model.Person._uri_segment @@ -239,6 +243,7 @@ class TestAutoIdentifiers(unittest.TestCase): self.assertEqual(int(p.id[-1]), int(p3.id[-1])) def test_uuid(self): + model.factory.auto_assign_id = True model.factory.auto_id_type = "uuid" p = model.Person() self.assertTrue(p.id.startswith('urn:uuid:')) @@ -254,6 +259,31 @@ class TestAutoIdentifiers(unittest.TestCase): p4 = model.Person('fish:4') self.assertTrue(p4.id.startswith(model.factory.base_url)) + def test_no_ident(self): + + model.factory.auto_assign_id = True + p1 = model.Person() # auto assigned + p2 = model.Person(ident=None) # auto assigned + p3 = model.Person(ident="") # bnode explicitly + + self.assertTrue(p1.id.startswith('http')) + self.assertTrue(p2.id.startswith('http')) + self.assertEqual(p3.id, '') + + model.factory.auto_assign_id = False + p4 = model.Person() # bnode is default + p5 = model.Person(ident=None) # bnode is default + p6 = model.Person(ident="") # bnode explicitly + + self.assertEqual(p4.id, '') + self.assertEqual(p5.id, '') + self.assertEqual(p6.id, '') + + + + + + class TestBaseResource(unittest.TestCase):
Allow ident="" to create blank nodes WISOTT
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_model.py::TestAutoIdentifiers::test_no_ident" ]
[ "tests/test_model.py::TestFactorySetup::test_base_dir", "tests/test_model.py::TestFactorySetup::test_base_url", "tests/test_model.py::TestFactorySetup::test_default_lang", "tests/test_model.py::TestFactorySetup::test_load_context", "tests/test_model.py::TestFactorySetup::test_pickle", "tests/test_model.py::TestFactorySetup::test_set_debug", "tests/test_model.py::TestFactorySetup::test_set_debug_stream", "tests/test_model.py::TestFactorySerialization::test_breadth", "tests/test_model.py::TestFactorySerialization::test_broken_unicode", "tests/test_model.py::TestFactorySerialization::test_external", "tests/test_model.py::TestFactorySerialization::test_pipe_scoped", "tests/test_model.py::TestFactorySerialization::test_recursion", "tests/test_model.py::TestFactorySerialization::test_string_list", "tests/test_model.py::TestFactorySerialization::test_toJSON", "tests/test_model.py::TestFactorySerialization::test_toJSON_full", "tests/test_model.py::TestFactorySerialization::test_toString", "tests/test_model.py::TestProcessTSV::test_process_tsv", "tests/test_model.py::TestBuildClasses::test_build_classes", "tests/test_model.py::TestBuildClass::test_build_class", "tests/test_model.py::TestAutoIdentifiers::test_bad_autoid", "tests/test_model.py::TestAutoIdentifiers::test_int", "tests/test_model.py::TestAutoIdentifiers::test_int_per_segment", "tests/test_model.py::TestAutoIdentifiers::test_int_per_type", "tests/test_model.py::TestAutoIdentifiers::test_prefixes", "tests/test_model.py::TestAutoIdentifiers::test_uuid", "tests/test_model.py::TestBaseResource::test_check_prop", "tests/test_model.py::TestBaseResource::test_init", "tests/test_model.py::TestBaseResource::test_list_all_props", "tests/test_model.py::TestBaseResource::test_multiplicity", "tests/test_model.py::TestMagicMethods::test_set_magic_resource", "tests/test_model.py::TestMagicMethods::test_set_magic_resource_inverse", "tests/test_model.py::TestMagicMethods::test_validate_multiplicity", "tests/test_model.py::TestMagicMethods::test_validate_profile_off", "tests/test_model.py::TestMagicMethods::test_validation_off", "tests/test_model.py::TestMagicMethods::test_validation_unknown", "tests/test_model.py::TestMagicMethods::test_validation_wrong_type" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2019-05-16T20:53:34Z"
apache-2.0
thegetty__crom-56
diff --git a/cromulent/model.py b/cromulent/model.py index 12997d8..e2363c1 100644 --- a/cromulent/model.py +++ b/cromulent/model.py @@ -371,10 +371,19 @@ class ExternalResource(object): _type = "" _embed = True + + def _is_uri(self, what): + uri_schemes = ['urn:uuid:', 'tag:', 'data:', 'mailto:', 'info:', 'ftp:/', 'sftp:/'] + for u in uri_schemes: + if what.startswith(u): + return True + return False + + def __init__(self, ident=None): self._factory = factory if ident is not None: - if ident.startswith('urn:uuid'): + if self._is_uri(ident): self.id = ident elif ident.startswith('http'): # Try to find prefixable term @@ -395,7 +404,7 @@ class ExternalResource(object): # Allow explicit setting of empty string self.id = "" else: - # Allow for prefixed term + # Allow for prefixed term that isn't ambiguously a URI curied = ident.split(':', 1) if len(curied) == 2 and curied[0] in self._factory.prefixes: self.id = ident
thegetty/crom
44cbb8103b7c6372c111dd7969e6049ffaa05ad0
diff --git a/tests/test_model.py b/tests/test_model.py index 281d0f8..e85827c 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -269,6 +269,14 @@ class TestAutoIdentifiers(unittest.TestCase): p4 = model.Person('fish:4') self.assertTrue(p4.id.startswith(model.factory.base_url)) + def test_other_uris(self): + p1 = model.Person(ident="tag:some-info-about-person") + self.assertEqual(p1.id, "tag:some-info-about-person") + p2 = model.Person(ident="info:ulan/500012345") + self.assertEqual(p2.id, "info:ulan/500012345") + p3 = model.Person(ident="some:random:thing:with:colons") + self.assertFalse(p3.id == "some:random:thing:with:colons") + def test_no_ident(self): model.factory.auto_assign_id = True
Support tag: URIs for internal identity management Rather than assigning UUIDs or other final URIs to instances, crom should support tag: URIs that are temporary carriers of the identity conditions, to then be substituted on the way into a more permanent infrastructure. These would need to be assigned (they can't be auto-generated) and would be similar to the urn:uuid: pattern otherwise. (/ht @kasei)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_model.py::TestAutoIdentifiers::test_other_uris" ]
[ "tests/test_model.py::TestFactorySetup::test_base_dir", "tests/test_model.py::TestFactorySetup::test_base_url", "tests/test_model.py::TestFactorySetup::test_default_lang", "tests/test_model.py::TestFactorySetup::test_load_context", "tests/test_model.py::TestFactorySetup::test_pickle", "tests/test_model.py::TestFactorySetup::test_set_debug", "tests/test_model.py::TestFactorySetup::test_set_debug_stream", "tests/test_model.py::TestFactorySerialization::test_breadth", "tests/test_model.py::TestFactorySerialization::test_broken_unicode", "tests/test_model.py::TestFactorySerialization::test_collapse_json", "tests/test_model.py::TestFactorySerialization::test_external", "tests/test_model.py::TestFactorySerialization::test_pipe_scoped", "tests/test_model.py::TestFactorySerialization::test_recursion", "tests/test_model.py::TestFactorySerialization::test_string_list", "tests/test_model.py::TestFactorySerialization::test_toJSON", "tests/test_model.py::TestFactorySerialization::test_toJSON_full", "tests/test_model.py::TestFactorySerialization::test_toString", "tests/test_model.py::TestProcessTSV::test_process_tsv", "tests/test_model.py::TestBuildClasses::test_build_classes", "tests/test_model.py::TestBuildClass::test_build_class", "tests/test_model.py::TestAutoIdentifiers::test_bad_autoid", "tests/test_model.py::TestAutoIdentifiers::test_int", "tests/test_model.py::TestAutoIdentifiers::test_int_per_segment", "tests/test_model.py::TestAutoIdentifiers::test_int_per_type", "tests/test_model.py::TestAutoIdentifiers::test_no_ident", "tests/test_model.py::TestAutoIdentifiers::test_prefixes", "tests/test_model.py::TestAutoIdentifiers::test_uuid", "tests/test_model.py::TestBaseResource::test_allows_multiple", "tests/test_model.py::TestBaseResource::test_check_prop", "tests/test_model.py::TestBaseResource::test_dir", "tests/test_model.py::TestBaseResource::test_init", "tests/test_model.py::TestBaseResource::test_init_params", "tests/test_model.py::TestBaseResource::test_list_all_props", "tests/test_model.py::TestBaseResource::test_list_my_props", "tests/test_model.py::TestBaseResource::test_multiplicity", "tests/test_model.py::TestPropertyCache::test_cache_hierarchy", "tests/test_model.py::TestMagicMethods::test_set_magic_resource", "tests/test_model.py::TestMagicMethods::test_set_magic_resource_inverse", "tests/test_model.py::TestMagicMethods::test_validate_multiplicity", "tests/test_model.py::TestMagicMethods::test_validate_profile_off", "tests/test_model.py::TestMagicMethods::test_validation_off", "tests/test_model.py::TestMagicMethods::test_validation_unknown", "tests/test_model.py::TestMagicMethods::test_validation_wrong_type" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2019-06-19T18:40:11Z"
apache-2.0
thelastpickle__cassandra-medusa-552
diff --git a/docs/Configuration.md b/docs/Configuration.md index 6fa77f1..2f02795 100644 --- a/docs/Configuration.md +++ b/docs/Configuration.md @@ -10,9 +10,14 @@ Modify it to match your requirements: ;config_file = <path to cassandra.yaml. Defaults to /etc/cassandra/cassandra.yaml> ;cql_username = <username> ;cql_password = <password> +; When using the following setting there must be files in: +; - `<cql_k8s_secrets_path>/username` containing username +; - `<cql_k8s_secrets_path>/password` containing password +;cql_k8s_secrets_path = <path to kubernetes secrets folder> ;nodetool_username = <my nodetool username> ;nodetool_password = <my nodetool password> ;nodetool_password_file_path = <path to nodetool password file> +;nodetool_k8s_secrets_path = <path to nodetool kubernetes secrets folder> ;nodetool_host = <host name or IP to use for nodetool> ;nodetool_port = <port number to use for nodetool> ;certfile= <Client SSL: path to rootCa certificate> @@ -153,12 +158,14 @@ backup_grace_period_in_days = 10 Some config settings can be overriden through environment variables prefixed with `MEDUSA_`: -| Setting | Env Variable | -|------------------------|-------------------------------| -| `cql_username` | `MEDUSA_CQL_USERNAME` | -| `cql_password` | `MEDUSA_CQL_PASSWORD` | -| `nodetool_username` | `MEDUSA_NODETOOL_USERNAME` | -| `nodetool_password` | `MEDUSA_NODETOOL_PASSWORD` | -| `sstableloader_tspw` | `MEDUSA_SSTABLELOADER_TSPW` | -| `sstableloader_kspw` | `MEDUSA_SSTABLELOADER_KSPW` | -| `resolve_ip_addresses` | `MEDUSA_RESOLVE_IP_ADDRESSES` | +| Setting | Env Variable | +|-----------------------------|------------------------------------| +| `cql_username` | `MEDUSA_CQL_USERNAME` | +| `cql_password` | `MEDUSA_CQL_PASSWORD` | +| `cql_k8s_secrets_path` | `MEDUSA_CQL_K8S_SECRETS_PATH` | +| `nodetool_username` | `MEDUSA_NODETOOL_USERNAME` | +| `nodetool_password` | `MEDUSA_NODETOOL_PASSWORD` | +| `nodetool_k8s_secrets_path` | `MEDUSA_NODETOOL_K8S_SECRETS_PATH` | +| `sstableloader_tspw` | `MEDUSA_SSTABLELOADER_TSPW` | +| `sstableloader_kspw` | `MEDUSA_SSTABLELOADER_KSPW` | +| `resolve_ip_addresses` | `MEDUSA_RESOLVE_IP_ADDRESSES` | diff --git a/medusa-example.ini b/medusa-example.ini index 715d825..e7a4e2d 100644 --- a/medusa-example.ini +++ b/medusa-example.ini @@ -18,9 +18,14 @@ ;config_file = <path to cassandra.yaml. Defaults to /etc/cassandra/cassandra.yaml> ;cql_username = <username> ;cql_password = <password> +; When using the following setting there must be files in: +; - `<cql_k8s_secrets_path>/username` containing username +; - `<cql_k8s_secrets_path>/password` containing password +;cql_k8s_secrets_path = <path to kubernetes secrets folder> ;nodetool_username = <my nodetool username> ;nodetool_password = <my nodetool password> ;nodetool_password_file_path = <path to nodetool password file> +;nodetool_k8s_secrets_path = <path to nodetool kubernetes secrets folder> ;nodetool_host = <host name or IP to use for nodetool> ;nodetool_port = <port number to use for nodetool> ;certfile= <Client SSL: path to rootCa certificate> diff --git a/medusa/config.py b/medusa/config.py index 0ecdfd3..e95ac0a 100644 --- a/medusa/config.py +++ b/medusa/config.py @@ -39,7 +39,8 @@ CassandraConfig = collections.namedtuple( ['start_cmd', 'stop_cmd', 'config_file', 'cql_username', 'cql_password', 'check_running', 'is_ccm', 'sstableloader_bin', 'nodetool_username', 'nodetool_password', 'nodetool_password_file_path', 'nodetool_host', 'nodetool_port', 'certfile', 'usercert', 'userkey', 'sstableloader_ts', 'sstableloader_tspw', - 'sstableloader_ks', 'sstableloader_kspw', 'nodetool_ssl', 'resolve_ip_addresses', 'use_sudo', 'nodetool_flags'] + 'sstableloader_ks', 'sstableloader_kspw', 'nodetool_ssl', 'resolve_ip_addresses', 'use_sudo', 'nodetool_flags', + 'cql_k8s_secrets_path', 'nodetool_k8s_secrets_path'] ) SSHConfig = collections.namedtuple( @@ -229,12 +230,30 @@ def parse_config(args, config_file): 'nodetool_password', 'sstableloader_tspw', 'sstableloader_kspw', - 'resolve_ip_addresses' + 'resolve_ip_addresses', + 'cql_k8s_secrets_path', + 'nodetool_k8s_secrets_path' ]: config_property_upper = "MEDUSA_{}".format(config_property.upper()) if config_property_upper in os.environ: config.set('cassandra', config_property, os.environ[config_property_upper]) + if config.has_option('cassandra', 'cql_k8s_secrets_path'): + cql_k8s_secrets_path = config.get('cassandra', 'cql_k8s_secrets_path') + if cql_k8s_secrets_path: + logging.debug('Using cql_k8s_secrets_path (path="{}")'.format(cql_k8s_secrets_path)) + cql_k8s_username, cql_k8s_password = _load_k8s_secrets(cql_k8s_secrets_path) + config.set('cassandra', 'cql_username', cql_k8s_username) + config.set('cassandra', 'cql_password', cql_k8s_password) + + if config.has_option('cassandra', 'nodetool_k8s_secrets_path'): + nodetool_k8s_secrets_path = config.get('cassandra', 'nodetool_k8s_secrets_path') + if nodetool_k8s_secrets_path: + logging.debug('Using nodetool_k8s_secrets_path (path="{}")'.format(nodetool_k8s_secrets_path)) + nodetool_k8s_username, nodetool_k8s_password = _load_k8s_secrets(nodetool_k8s_secrets_path) + config.set('cassandra', 'nodetool_username', nodetool_k8s_username) + config.set('cassandra', 'nodetool_password', nodetool_k8s_password) + resolve_ip_addresses = config['cassandra']['resolve_ip_addresses'] hostname_resolver = HostnameResolver(resolve_ip_addresses, kubernetes_enabled) if config['storage']['fqdn'] == socket.getfqdn() and not resolve_ip_addresses: @@ -249,6 +268,27 @@ def parse_config(args, config_file): return config +def _load_k8s_secrets(k8s_secrets_path): + """Load username and password from files following the k8s secrets convention. + + :param str k8s_secrets_path: folder path containing the secrets + :return str, str: username and password contained in files + """ + # By default, username and password are available in path/username and path/password. + # They could be in other places if overridden, this is not supported for now. Refs: + # https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod + # https://kubernetes.io/docs/concepts/configuration/secret/#consuming-secret-values-from-volumes + k8s_username_file = os.path.join(k8s_secrets_path, 'username') + logging.debug('Loading k8s username from "{}"'.format(k8s_username_file)) + with open(k8s_username_file, 'r') as f: + k8s_username = f.read().strip() + k8s_password_file = os.path.join(k8s_secrets_path, 'password') + logging.debug('Loading k8s password from "{}"'.format(k8s_password_file)) + with open(k8s_password_file, 'r') as f: + k8s_password = f.read().strip() + return k8s_username, k8s_password + + def load_config(args, config_file): """Load configuration from a medusa.ini file
thelastpickle/cassandra-medusa
d73d5b03f558a9ac5d08f18d52c276a685975d1d
diff --git a/tests/config_test.py b/tests/config_test.py index 56acc04..3c4ead6 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -18,6 +18,7 @@ import pathlib import unittest from unittest.mock import patch import socket +import tempfile import medusa.config import medusa.utils @@ -98,6 +99,44 @@ class ConfigTest(unittest.TestCase): assert config.cassandra.cql_username == 'new_cql_username' assert config.cassandra.cql_password == 'new_cql_password' + def test_cql_k8s_secrets_path_override(self): + """ + Ensure that CQL credentials stored in a path following k8s convention override the default vars. + """ + tmpdir = tempfile.mkdtemp() + os.environ['MEDUSA_CQL_K8S_SECRETS_PATH'] = tmpdir + # Write k8s_username and k8s_password in /tmpdir/username and /tmpdir/password + for k8s_cred in ['username', 'password']: + with open(os.path.join(tmpdir, k8s_cred), 'w') as f: + f.write('k8s_{}'.format(k8s_cred)) + + args = {} + config = medusa.config.load_config(args, self.medusa_config_file) + assert config.cassandra.cql_username == 'k8s_username' + assert config.cassandra.cql_password == 'k8s_password' + + # Cleanup + os.environ.pop('MEDUSA_CQL_K8S_SECRETS_PATH', None) + + def test_nodetool_k8s_secrets_path_override(self): + """ + Ensure that nodetool credentials stored in a path following k8s convention override the default vars. + """ + tmpdir = tempfile.mkdtemp() + os.environ['MEDUSA_NODETOOL_K8S_SECRETS_PATH'] = tmpdir + # Write nodetool_username and nodetool_password in /tmpdir/username and /tmpdir/password + for k8s_cred in ['username', 'password']: + with open(os.path.join(tmpdir, k8s_cred), 'w') as f: + f.write('k8s_{}'.format(k8s_cred)) + + args = {} + config = medusa.config.load_config(args, self.medusa_config_file) + assert config.cassandra.nodetool_username == 'k8s_username' + assert config.cassandra.nodetool_password == 'k8s_password' + + # Cleanup + os.environ.pop('MEDUSA_NODETOOL_K8S_SECRETS_PATH', None) + def test_args_settings_override(self): """Ensure that each config file's section settings can be overridden with command line options""" args = {
Allow Medusa to take CQL credentials through a file The K8ssandra-operator modular secrets backend requires the ability for Medusa to read CQL credentials from files. Currently only environment variables are supported. ┆Issue is synchronized with this [Jira Task](https://k8ssandra.atlassian.net/browse/K8SSAND-1619) by [Unito](https://www.unito.io) ┆friendlyId: K8SSAND-1619 ┆priority: Medium
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/config_test.py::ConfigTest::test_cql_k8s_secrets_path_override", "tests/config_test.py::ConfigTest::test_nodetool_k8s_secrets_path_override" ]
[ "tests/config_test.py::ConfigTest::test_args_settings_override", "tests/config_test.py::ConfigTest::test_different_auth_env_variables", "tests/config_test.py::ConfigTest::test_fqdn_with_resolve_ip_addresses_disabled", "tests/config_test.py::ConfigTest::test_fqdn_with_resolve_ip_addresses_enabled", "tests/config_test.py::ConfigTest::test_new_env_variables_override_deprecated_ones", "tests/config_test.py::ConfigTest::test_no_auth_env_variables", "tests/config_test.py::ConfigTest::test_overridden_fqdn", "tests/config_test.py::ConfigTest::test_use_sudo_default", "tests/config_test.py::ConfigTest::test_use_sudo_kubernetes_disabled", "tests/config_test.py::ConfigTest::test_use_sudo_kubernetes_enabled" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-01-04T15:35:43Z"
apache-2.0
thelastpickle__cassandra-medusa-592
diff --git a/medusa/service/grpc/restore.py b/medusa/service/grpc/restore.py index 0d5ccf6..ea6e79e 100644 --- a/medusa/service/grpc/restore.py +++ b/medusa/service/grpc/restore.py @@ -23,7 +23,6 @@ from pathlib import Path import medusa.config import medusa.restore_node import medusa.listing -from medusa.service.grpc.server import RESTORE_MAPPING_LOCATION def create_config(config_file_path): @@ -49,43 +48,34 @@ def configure_console_logging(config): logging.getLogger(logger_name).setLevel(logging.WARN) -if __name__ == '__main__': - if len(sys.argv) > 3: - config_file_path = sys.argv[2] - restore_key = sys.argv[3] - else: - logging.error("Usage: {} <config_file_path> <restore_key>".format(sys.argv[0])) - sys.exit(1) - +def apply_mapping_env(): + # By default we consider that we're restoring in place. in_place = True - if os.path.exists(f"{RESTORE_MAPPING_LOCATION}/{restore_key}"): - logging.info(f"Reading mapping file {RESTORE_MAPPING_LOCATION}/{restore_key}") - with open(f"{RESTORE_MAPPING_LOCATION}/{restore_key}", 'r') as f: - mapping = json.load(f) - # Mapping json structure will look like: - # {'in_place': true, - # 'host_map': - # {'172.24.0.3': {'source': ['172.24.0.3'], 'seed': False}, - # '127.0.0.1': {'source': ['172.24.0.4'], 'seed': False}, - # '172.24.0.6': {'source': ['172.24.0.6'], 'seed': False}}} - # As each mapping is specific to a Cassandra node, we're looking for the node that maps to 127.0.0.1, - # which will be different for each pod. - # If hostname resolving is turned on, we're looking for the localhost key instead. + if "RESTORE_MAPPING" in os.environ.keys(): + logging.info("Reading restore mapping from environment variable") + mapping = json.loads(os.environ["RESTORE_MAPPING"]) + # Mapping json structure will look like: + # {'in_place': true, + # 'host_map': + # {'test-dc1-sts-0': {'source': ['172.24.0.3'], 'seed': False}, + # 'test-dc1-sts-1': {'source': ['172.24.0.4'], 'seed': False}, + # 'test-dc1-sts-2': {'source': ['172.24.0.6'], 'seed': False}}} + # As each mapping is specific to a Cassandra node, we're looking for + # the node that maps to the value of the POD_NAME var. + in_place = mapping["in_place"] + if not in_place: print(f"Mapping: {mapping}") - if "localhost" in mapping["host_map"].keys(): - os.environ["POD_IP"] = mapping["host_map"]["localhost"]["source"][0] - elif "127.0.0.1" in mapping["host_map"].keys(): - os.environ["POD_IP"] = mapping["host_map"]["127.0.0.1"]["source"][0] - elif "::1" in mapping["host_map"].keys(): - os.environ["POD_IP"] = mapping["host_map"]["::1"]["source"][0] - in_place = mapping["in_place"] - if not in_place and "POD_IP" not in os.environ.keys(): - print("Could not find target node mapping for this pod while performing remote restore. Exiting.") - sys.exit(1) + # While POD_IP isn't a great name, it's the env variable that is used to enforce the fqdn of the node. + # This allows us to specify which node we're restoring from. + if os.environ["POD_NAME"] in mapping["host_map"].keys(): + os.environ["POD_IP"] = mapping["host_map"][os.environ["POD_NAME"]]["source"][0] + print(f"Restoring from {os.environ['POD_IP']}") + else: + return False, f"POD_NAME {os.environ['POD_NAME']} not found in mapping" + return in_place, None - config = create_config(config_file_path) - configure_console_logging(config.logging) +def restore_backup(in_place, config): backup_name = os.environ["BACKUP_NAME"] tmp_dir = Path("/tmp") if "MEDUSA_TMP_DIR" not in os.environ else Path(os.environ["MEDUSA_TMP_DIR"]) print(f"Downloading backup {backup_name} to {tmp_dir}") @@ -98,17 +88,33 @@ if __name__ == '__main__': cluster_backups = list(medusa.listing.get_backups(config, True)) logging.info(f"Found {len(cluster_backups)} backups in the cluster") - backup_found = False # Checking if the backup exists for the node we're restoring. # Skipping restore if it doesn't exist. for cluster_backup in cluster_backups: if cluster_backup.name == backup_name: - backup_found = True logging.info("Starting restore of backup {}".format(backup_name)) medusa.restore_node.restore_node(config, tmp_dir, backup_name, in_place, keep_auth, seeds, verify, keyspaces, tables, use_sstableloader) - logging.info("Finished restore of backup {}".format(backup_name)) - break + return f"Finished restore of backup {backup_name}" + + return f"Skipped restore of missing backup {backup_name}" + + +if __name__ == '__main__': + if len(sys.argv) > 3: + config_file_path = sys.argv[2] + restore_key = sys.argv[3] + else: + logging.error("Usage: {} <config_file_path> <restore_key>".format(sys.argv[0])) + sys.exit(1) + + (in_place, error_message) = apply_mapping_env() + if error_message: + print(error_message) + sys.exit(1) + + config = create_config(config_file_path) + configure_console_logging(config.logging) - if not backup_found: - logging.info("Skipped restore of missing backup {}".format(backup_name)) + output_message = restore_backup(in_place, config) + logging.info(output_message) diff --git a/medusa/service/grpc/server.py b/medusa/service/grpc/server.py index 119a31e..9a456a9 100644 --- a/medusa/service/grpc/server.py +++ b/medusa/service/grpc/server.py @@ -43,6 +43,7 @@ TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S' BACKUP_MODE_DIFFERENTIAL = "differential" BACKUP_MODE_FULL = "full" RESTORE_MAPPING_LOCATION = "/var/lib/cassandra/.restore_mapping" +RESTORE_MAPPING_ENV = "RESTORE_MAPPING" class Server:
thelastpickle/cassandra-medusa
ec425ec37f42cb5434644e9b84a353b5f49f3842
diff --git a/tests/service/grpc/restore_test.py b/tests/service/grpc/restore_test.py new file mode 100644 index 0000000..831dcea --- /dev/null +++ b/tests/service/grpc/restore_test.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +import os +from unittest.mock import MagicMock, patch +from pathlib import PosixPath + +from medusa.service.grpc.restore import apply_mapping_env, restore_backup + + +class ServiceRestoreTest(unittest.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def setUp(self): + os.environ.pop('POD_IP', None) + os.environ.pop('POD_NAME', None) + os.environ.pop('RESTORE_MAPPING', None) + + def test_restore_inplace(self): + os.environ['POD_NAME'] = 'test-dc1-sts-0' + os.environ['RESTORE_MAPPING'] = '{"in_place": true, "host_map": {' \ + + '"test-dc1-sts-0": {"source": ["test-dc1-sts-0"], "seed": false},' \ + + '"test-dc1-sts-1": {"source": ["test-dc1-sts-1"], "seed": false},' \ + + '"test-dc1-sts-2": {"source": "prod-dc1-sts-2", "seed": false}}}' + (in_place, error_message) = apply_mapping_env() + + assert in_place is True + assert error_message is None + assert "POD_IP" not in os.environ.keys() + + def test_restore_remote(self): + os.environ.update({'POD_NAME': 'test-dc1-sts-0'}) + os.environ['RESTORE_MAPPING'] = '{"in_place": false, "host_map": {' \ + + '"test-dc1-sts-0": {"source": ["prod-dc1-sts-3"], "seed": false},' \ + + '"test-dc1-sts-1": {"source": ["prod-dc1-sts-1"], "seed": false},' \ + + '"test-dc1-sts-2": {"source": "prod-dc1-sts-2", "seed": false}}}' + (in_place, error_message) = apply_mapping_env() + + assert in_place is False + assert error_message is None + assert "POD_IP" in os.environ.keys() + assert os.environ['POD_IP'] == 'prod-dc1-sts-3' + + def test_restore_no_match(self): + os.environ['POD_NAME'] = 'test-dc1-sts-0' + os.environ['RESTORE_MAPPING'] = '{"in_place": false, "host_map": {' \ + + '"test-dc1-sts-3": {"source": ["prod-dc1-sts-3"], "seed": false},' \ + + '"test-dc1-sts-1": {"source": ["prod-dc1-sts-1"], "seed": false},' \ + + '"test-dc1-sts-2": {"source": "prod-dc1-sts-2", "seed": false}}}' + (in_place, error_message) = apply_mapping_env() + + assert in_place is False + assert error_message is not None + assert "POD_IP" not in os.environ.keys() + + def test_success_restore_backup(self): + # Define test inputs + in_place = True + config = {'some': 'config'} + + # Define expected output + expected_output = 'Finished restore of backup test_backup' + + # Set up mock environment variables + os.environ["BACKUP_NAME"] = "test_backup" + os.environ["MEDUSA_TMP_DIR"] = "/tmp" + + # Set up mock for medusa.listing.get_backups() + with patch('medusa.listing.get_backups') as mock_get_backups: + mock_cluster_backup = MagicMock() + mock_cluster_backup.name = "test_backup" + mock_get_backups.return_value = [mock_cluster_backup] + + # Set up mock for medusa.restore_node.restore_node() + with patch('medusa.restore_node.restore_node') as mock_restore_node: + mock_restore_node.return_value = None + + # Call the function + result = restore_backup(in_place, config) + + # Assertions + assert result == expected_output + mock_get_backups.assert_called_once_with(config, True) + mock_restore_node.assert_called_once_with(config, PosixPath('/tmp'), + 'test_backup', True, False, None, False, {}, {}, False) + + def test_fail_restore_backup(self): + # Define test inputs + in_place = True + config = {'some': 'config'} + + # Define expected output + expected_output = 'Skipped restore of missing backup test_backup' + + # Set up mock environment variables + os.environ["BACKUP_NAME"] = "test_backup" + os.environ["MEDUSA_TMP_DIR"] = "/tmp" + + # Set up mock for medusa.listing.get_backups() + with patch('medusa.listing.get_backups') as mock_get_backups: + mock_cluster_backup = MagicMock() + mock_cluster_backup.name = "test_backup10" + mock_get_backups.return_value = [mock_cluster_backup] + + # Set up mock for medusa.restore_node.restore_node() + with patch('medusa.restore_node.restore_node') as mock_restore_node: + mock_restore_node.return_value = None + + result = restore_backup(in_place, config) + + assert result == expected_output + mock_get_backups.assert_called_once_with(config, True) + mock_restore_node.assert_not_called() + + +if __name__ == '__main__': + unittest.main()
Modify k8s restores to use an env variable to store the restore mapping Currently in `restore.py`, we read from a file the restore mapping using a fairly brittle process. We need to evolve this into reading the mapping from an env variable, using a mapping generated by k8ssandra-operator instead of medusa itself. We'll rely on the `POD_NAME` env variable to identify the current node and find the source node for the restore, which will be used to set the `POD_IP` env variable and enforce the fqdn for the restore operation.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/service/grpc/restore_test.py::ServiceRestoreTest::test_fail_restore_backup", "tests/service/grpc/restore_test.py::ServiceRestoreTest::test_restore_inplace", "tests/service/grpc/restore_test.py::ServiceRestoreTest::test_restore_no_match", "tests/service/grpc/restore_test.py::ServiceRestoreTest::test_restore_remote", "tests/service/grpc/restore_test.py::ServiceRestoreTest::test_success_restore_backup" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-05-30T18:29:01Z"
apache-2.0
thelastpickle__cassandra-medusa-597
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7ef0044..698d56b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -107,7 +107,7 @@ jobs: matrix: #python-version: [3.6] python-version: [3.6, "3.10"] - it-backend: [local, s3, gcs, minio, azure] + it-backend: [local, s3, gcs, minio, azure, azure-hierarchical] # IBM not included by default due to lite plan quota being easily exceeded #it-backend: [local, s3, gcs, minio, ibm, azure] cassandra-version: [2.2.19, 3.11.11, 4.0.0, 'github:apache/trunk'] @@ -146,7 +146,15 @@ jobs: cassandra-version: 'github:apache/trunk' - it-backend: azure python-version: "3.10" - + - it-backend: azure-hierarchical + cassandra-version: 2.2.19 + - it-backend: azure-hierarchical + cassandra-version: 3.11.11 + - it-backend: azure-hierarchical + cassandra-version: 'github:apache/trunk' + - it-backend: azure-hierarchical + python-version: "3.10" + runs-on: ubuntu-20.04 services: minio: @@ -180,7 +188,7 @@ jobs: pip install -r requirements-test.txt pip install ccm case '${{ matrix.it-backend }}' in - 'azure') + 'azure'|'azure-hierarchical') pip install -r requirements-azure.txt ;; 'ibm'|'minio'|'s3') @@ -198,7 +206,8 @@ jobs: || ( "${{ matrix.it-backend }}" == "minio" ) \ || ( -n '${{ secrets.MEDUSA_GCS_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "gcs" ) \ || ( -n '${{ secrets.MEDUSA_IBM_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "ibm" ) \ - || ( -n '${{ secrets.MEDUSA_AZURE_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "azure" ) ]]; + || ( -n '${{ secrets.MEDUSA_AZURE_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "azure" ) \ + || ( -n '${{ secrets.MEDUSA_AZURE_HIERARCHICAL_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "azure-hierarchical" ) ]]; then echo "IT_CAN_RUN=yes" >> $GITHUB_ENV else @@ -263,6 +272,11 @@ jobs: # Azure Blob Storage tests printf "%s" '${{ secrets.MEDUSA_AZURE_CREDENTIALS }}' > ~/medusa_azure_credentials.json ./run_integration_tests.sh -v --azure --no-local --cassandra-version=${{ matrix.cassandra-version }} + elif [ "${{ matrix.it-backend }}" == "azure-hierarchical" ] + then + # Azure Blob Storage with hierarchical namespace tests + printf "%s" '${{ secrets.MEDUSA_AZURE_HIERARCHICAL_CREDENTIALS }}' > ~/medusa_azure_credentials.json + ./run_integration_tests.sh -v --azure --no-local --cassandra-version=${{ matrix.cassandra-version }} else # Local storage tests ./run_integration_tests.sh -v --cassandra-version=${{ matrix.cassandra-version }} diff --git a/medusa/storage/__init__.py b/medusa/storage/__init__.py index 9485aa2..30173d4 100644 --- a/medusa/storage/__init__.py +++ b/medusa/storage/__init__.py @@ -281,7 +281,8 @@ class Storage(object): def group_backup_index_by_backup_and_node(self, backup_index_blobs): def get_backup_name(blob): - return blob.name.split('/')[2] if len(str(self.prefix_path)) <= 1 else blob.name.split('/')[3] + blob_name_chunks = blob.name.split('/') + return blob_name_chunks[2] if len(str(self.prefix_path)) <= 1 else blob_name_chunks[3] def name_and_fqdn(blob): return get_backup_name(blob), Storage.get_fqdn_from_any_index_blob(blob) @@ -292,9 +293,20 @@ class Storage(object): def group_by_fqdn(blobs): return itertools.groupby(blobs, Storage.get_fqdn_from_any_index_blob) + def has_proper_name(blob): + blob_name_chunks = blob.name.split('/') + is_proper = len(blob_name_chunks) == 4 if len(str(self.prefix_path)) <= 1 else len(blob_name_chunks) == 5 + if not is_proper: + logging.warning('File {} in backup index has improper name'.format(blob.name)) + return is_proper + blobs_by_backup = {} + properly_named_index_blobs = filter( + has_proper_name, + backup_index_blobs + ) sorted_backup_index_blobs = sorted( - backup_index_blobs, + properly_named_index_blobs, key=name_and_fqdn ) diff --git a/medusa/storage/abstract_storage.py b/medusa/storage/abstract_storage.py index 61c5436..bd887c0 100644 --- a/medusa/storage/abstract_storage.py +++ b/medusa/storage/abstract_storage.py @@ -54,6 +54,8 @@ class AbstractStorage(abc.ABC): else: objects = self.driver.list_container_objects(self.bucket, ex_prefix=str(path)) + objects = list(filter(lambda blob: blob.size > 0, objects)) + return objects @retry(stop_max_attempt_number=7, wait_exponential_multiplier=10000, wait_exponential_max=120000) diff --git a/medusa/storage/local_storage.py b/medusa/storage/local_storage.py index 0759d27..57da294 100644 --- a/medusa/storage/local_storage.py +++ b/medusa/storage/local_storage.py @@ -41,6 +41,8 @@ class LocalStorage(AbstractStorage): if path is not None: objects = list(filter(lambda blob: blob.name.startswith(path), objects)) + objects = list(filter(lambda blob: blob.size > 0, objects)) + return objects def get_object_datetime(self, blob):
thelastpickle/cassandra-medusa
9d1060268f9512a5a6215f4ca98bdc44cc456b42
diff --git a/tests/storage_test.py b/tests/storage_test.py index c7c0ded..94307d8 100644 --- a/tests/storage_test.py +++ b/tests/storage_test.py @@ -92,8 +92,10 @@ class RestoreNodeTest(unittest.TestCase): def test_list_objects(self): file1_content = "content of the test file1" file2_content = "content of the test file2" + file3_content = "" self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content) self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content) + self.storage.storage_driver.upload_blob_from_string("test_download_blobs3/file3.txt", file3_content) objects = self.storage.storage_driver.list_objects() self.assertEqual(len(objects), 2) one_object = self.storage.storage_driver.list_objects("test_download_blobs2") @@ -284,6 +286,34 @@ class RestoreNodeTest(unittest.TestCase): self.assertTrue("node1" in blobs_by_backup["backup2"]) self.assertFalse("node2" in blobs_by_backup["backup2"]) + def test_parse_backup_index_with_wrong_names(self): + file_content = "content of the test file" + prefix_path = self.storage.prefix_path + + # Index files for a backup + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/backup3/tokenmap_node1.json".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/backup3/schema_node1.cql".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/backup3/started_node1_1689598370.timestamp".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/backup3/finished_node1_1689598370.timestamp".format(prefix_path), file_content) + # Files that we want to see filtered out + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/extra_folder/backup3/tokenmap_node2.json".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/missing_folder/tokenmap_node2.json".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/missing_file".format(prefix_path), file_content) + + path = '{}index/backup_index'.format(prefix_path) + backup_index = self.storage.storage_driver.list_objects(path) + blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(backup_index) + self.assertEqual(1, len(blobs_by_backup.keys())) + self.assertEqual(1, len(blobs_by_backup['backup3'].keys())) + self.assertEqual(4, len(blobs_by_backup['backup3']['node1'])) + def test_remove_extension(self): self.assertEqual( 'localhost', diff --git a/tests/storage_test_with_prefix.py b/tests/storage_test_with_prefix.py index 5bf5f73..618548a 100644 --- a/tests/storage_test_with_prefix.py +++ b/tests/storage_test_with_prefix.py @@ -93,8 +93,10 @@ class RestoreNodeTest(unittest.TestCase): def test_list_objects(self): file1_content = "content of the test file1" file2_content = "content of the test file2" + file3_content = "" self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content) self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content) + self.storage.storage_driver.upload_blob_from_string("test_download_blobs3/file3.txt", file3_content) objects = self.storage.storage_driver.list_objects() self.assertEqual(len(objects), 2) one_object = self.storage.storage_driver.list_objects("test_download_blobs2") @@ -286,6 +288,34 @@ class RestoreNodeTest(unittest.TestCase): self.assertTrue("node1" in blobs_by_backup["backup2"]) self.assertFalse("node2" in blobs_by_backup["backup2"]) + def test_parse_backup_index_with_wrong_names(self): + file_content = "content of the test file" + prefix_path = self.storage.prefix_path + + # Index files for a backup + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/backup3/tokenmap_node1.json".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/backup3/schema_node1.cql".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/backup3/started_node1_1689598370.timestamp".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/backup3/finished_node1_1689598370.timestamp".format(prefix_path), file_content) + # Files that we want to see filtered out + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/extra_folder/backup3/tokenmap_node2.json".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/missing_folder/tokenmap_node2.json".format(prefix_path), file_content) + self.storage.storage_driver.upload_blob_from_string( + "{}index/backup_index/missing_file".format(prefix_path), file_content) + + path = '{}index/backup_index'.format(prefix_path) + backup_index = self.storage.storage_driver.list_objects(path) + blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(backup_index) + self.assertEqual(1, len(blobs_by_backup.keys())) + self.assertEqual(1, len(blobs_by_backup['backup3'].keys())) + self.assertEqual(4, len(blobs_by_backup['backup3']['node1'])) + def test_remove_extension(self): self.assertEqual( 'localhost',
Listing backups fails on some installations against Azure [Project board link](https://github.com/orgs/k8ssandra/projects/8/views/1?pane=issue&itemId=32514135) While we couldn't reproduce the issue, we're seeing installations where backups are taken correctly but listing them fails with the following error: ``` Traceback (most recent call last): File "/usr/local/bin/medusa", line 8, in <module> sys.exit(cli()) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1128, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1053, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1659, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1395, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 754, in invoke return __callback(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/click/decorators.py", line 84, in new_func return ctx.invoke(f, obj, *args, **kwargs) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 754, in invoke return __callback(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/medusa/medusacli.py", line 185, in list_backups medusa.listing.list_backups(medusaconfig, show_all) File "/usr/local/lib/python3.6/site-packages/medusa/listing.py", line 41, in list_backups cluster_backups = get_backups(config, show_all) File "/usr/local/lib/python3.6/site-packages/medusa/listing.py", line 29, in get_backups key=lambda b: b.started File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 355, in list_cluster_backups key=lambda b: (b.name, b.started) File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 190, in list_node_backups blobs_by_backup = self.group_backup_index_by_backup_and_node(backup_index_blobs) File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 299, in group_backup_index_by_backup_and_node key=name_and_fqdn File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 288, in name_and_fqdn return get_backup_name(blob), Storage.get_fqdn_from_any_index_blob(blob) File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 285, in get_backup_name return blob.name.split('/')[2] if len(str(self.prefix_path)) <= 1 else blob.name.split('/')[3] IndexError: list index out of range ``` After adding some debugging outputs, we see that the folders are returned when listing backups, giving a hierarchical view of the blobs instead of a flat one: ``` [2023-06-29 20:29:25,480] DEBUG: Loading storage_provider: azure_blobs [2023-06-29 20:29:26,966] DEBUG: [Storage] Listing objects in index/backup_index [2023-06-29 20:29:26,991] DEBUG: Found backup index blob: index/backup_index [2023-06-29 20:29:26,991] DEBUG: Found backup index blob: index/backup_index/cassandra_backup_20230628 [2023-06-29 20:29:26,991] DEBUG: Found backup index blob: index/backup_index/cassandra_backup_20230628/differential.... ... ``` This is unexpected and not dealt with correctly in the code. We either need to filter out the folders when listing blobs, which should be made possible thanks to their 0 size. We could also make the `get_backup_name()` method more resilient to such issues and do a precheck before trying to access the split array indices. If we don't have the index we're looking for, we can issue a warning and skip the file without erroring. ## Definition of Done - [ ] when listing blobs in the list-backups command, empty files are filtered out - [ ] `get_backup_name()` will detect blob that aren't named as expected and ignore them
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/storage_test.py::RestoreNodeTest::test_list_objects", "tests/storage_test.py::RestoreNodeTest::test_parse_backup_index_with_wrong_names", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_list_objects", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_parse_backup_index_with_wrong_names" ]
[ "tests/storage_test.py::RestoreNodeTest::test_add_object_from_string", "tests/storage_test.py::RestoreNodeTest::test_download_blobs", "tests/storage_test.py::RestoreNodeTest::test_generate_md5_hash", "tests/storage_test.py::RestoreNodeTest::test_get_blob", "tests/storage_test.py::RestoreNodeTest::test_get_fqdn_from_any_index_blob", "tests/storage_test.py::RestoreNodeTest::test_get_fqdn_from_backup_index_blob", "tests/storage_test.py::RestoreNodeTest::test_get_object_datetime", "tests/storage_test.py::RestoreNodeTest::test_get_timestamp_from_blob_name", "tests/storage_test.py::RestoreNodeTest::test_hashes_match", "tests/storage_test.py::RestoreNodeTest::test_parse_backup_index", "tests/storage_test.py::RestoreNodeTest::test_read_blob", "tests/storage_test.py::RestoreNodeTest::test_read_blob_as_bytes", "tests/storage_test.py::RestoreNodeTest::test_remove_extension", "tests/storage_test.py::RestoreNodeTest::test_verify_hash", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_add_object_from_string", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_download_blobs", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_blob", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_fqdn_from_any_index_blob", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_fqdn_from_backup_index_blob", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_object_datetime", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_timestamp_from_blob_name", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_hashes_match", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_parse_backup_index", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_parse_backup_index_common_prefix", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_read_blob", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_read_blob_as_bytes", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_remove_extension", "tests/storage_test_with_prefix.py::RestoreNodeTest::test_verify_hash" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-07-14T11:25:50Z"
apache-2.0
thelastpickle__cassandra-medusa-649
diff --git a/medusa/storage/s3_base_storage.py b/medusa/storage/s3_base_storage.py index 5fc5e77..6819dce 100644 --- a/medusa/storage/s3_base_storage.py +++ b/medusa/storage/s3_base_storage.py @@ -99,55 +99,40 @@ class S3BaseStorage(AbstractStorage): def __init__(self, config): - if config.kms_id: + self.kms_id = None + if config.kms_id is not None: logging.debug("Using KMS key {}".format(config.kms_id)) + self.kms_id = config.kms_id self.credentials = self._consolidate_credentials(config) - logging.info('Using credentials {}'.format(self.credentials)) self.bucket_name: str = config.bucket_name - self.config = config - - super().__init__(config) - def connect(self): - - if self.config.storage_provider != 's3_compatible': - # assuming we're dealing with regular aws - s3_url = "https://{}.s3.amazonaws.com".format(self.bucket_name) - else: - # we're dealing with a custom s3 compatible storage, so we need to craft the URL - protocol = 'https' if self.config.secure.lower() == 'true' else 'http' - port = '' if self.config.port is None else str(self.config.port) - s3_url = '{}://{}:{}'.format(protocol, self.config.host, port) + self.storage_provider = config.storage_provider - logging.info('Using S3 URL {}'.format(s3_url)) + self.connection_extra_args = self._make_connection_arguments(config) + self.transfer_config = self._make_transfer_config(config) - logging.debug('Connecting to S3') - extra_args = {} - if self.config.storage_provider == 's3_compatible': - extra_args['endpoint_url'] = s3_url - extra_args['verify'] = False + super().__init__(config) + def connect(self): + logging.info( + 'Connecting to {} with args {}'.format( + self.storage_provider, self.connection_extra_args + ) + ) boto_config = Config( region_name=self.credentials.region, signature_version='v4', tcp_keepalive=True ) - - self.trasnfer_config = TransferConfig( - # we hard-code this one because the parallelism is for now applied to chunking the files - max_concurrency=4, - max_bandwidth=AbstractStorage._human_size_to_bytes(self.config.transfer_max_bandwidth), - ) - self.s3_client = boto3.client( 's3', config=boto_config, aws_access_key_id=self.credentials.access_key_id, aws_secret_access_key=self.credentials.secret_access_key, - **extra_args + **self.connection_extra_args ) def disconnect(self): @@ -157,6 +142,39 @@ class S3BaseStorage(AbstractStorage): except Exception as e: logging.error('Error disconnecting from S3: {}'.format(e)) + def _make_connection_arguments(self, config) -> t.Dict[str, str]: + + secure = config.secure or 'True' + host = config.host + port = config.port + + if self.storage_provider != 's3_compatible': + # when we're dealing with regular AWS, we don't need anything extra + return {} + else: + # we're dealing with a custom s3 compatible storage, so we need to craft the URL + protocol = 'https' if secure.lower() == 'true' else 'http' + port = '' if port is None else str(port) + s3_url = '{}://{}:{}'.format(protocol, host, port) + return { + 'endpoint_url': s3_url, + 'verify': protocol == 'https' + } + + def _make_transfer_config(self, config): + + transfer_max_bandwidth = config.transfer_max_bandwidth or None + + # we hard-code this one because the parallelism is for now applied to chunking the files + transfer_config = { + 'max_concurrency': 4 + } + + if transfer_max_bandwidth is not None: + transfer_config['max_bandwidth'] = AbstractStorage._human_size_to_bytes(transfer_max_bandwidth) + + return TransferConfig(**transfer_config) + @staticmethod def _consolidate_credentials(config) -> CensoredCredentials: @@ -206,13 +224,13 @@ class S3BaseStorage(AbstractStorage): async def _upload_object(self, data: io.BytesIO, object_key: str, headers: t.Dict[str, str]) -> AbstractBlob: kms_args = {} - if self.config.kms_id is not None: + if self.kms_id is not None: kms_args['ServerSideEncryption'] = 'aws:kms' - kms_args['SSEKMSKeyId'] = self.config.kms_id + kms_args['SSEKMSKeyId'] = self.kms_id logging.debug( '[S3 Storage] Uploading object from stream -> s3://{}/{}'.format( - self.config.bucket_name, object_key + self.bucket_name, object_key ) ) @@ -220,7 +238,7 @@ class S3BaseStorage(AbstractStorage): # not passing in the transfer config because that is meant to cap a throughput # here we are uploading a small-ish file so no need to cap self.s3_client.put_object( - Bucket=self.config.bucket_name, + Bucket=self.bucket_name, Key=object_key, Body=data, **kms_args, @@ -248,24 +266,24 @@ class S3BaseStorage(AbstractStorage): # print also object size logging.debug( '[S3 Storage] Downloading {} -> {}/{}'.format( - object_key, self.config.bucket_name, object_key + object_key, self.bucket_name, object_key ) ) try: self.s3_client.download_file( - Bucket=self.config.bucket_name, + Bucket=self.bucket_name, Key=object_key, Filename=file_path, - Config=self.trasnfer_config, + Config=self.transfer_config, ) except Exception as e: - logging.error('Error downloading file from s3://{}/{}: {}'.format(self.config.bucket_name, object_key, e)) + logging.error('Error downloading file from s3://{}/{}: {}'.format(self.bucket_name, object_key, e)) raise ObjectDoesNotExistError('Object {} does not exist'.format(object_key)) async def _stat_blob(self, object_key: str) -> AbstractBlob: try: - resp = self.s3_client.head_object(Bucket=self.config.bucket_name, Key=object_key) + resp = self.s3_client.head_object(Bucket=self.bucket_name, Key=object_key) item_hash = resp['ETag'].replace('"', '') return AbstractBlob(object_key, int(resp['ContentLength']), item_hash, resp['LastModified']) except ClientError as e: @@ -275,7 +293,7 @@ class S3BaseStorage(AbstractStorage): else: # Handle other exceptions if needed logging.error("An error occurred:", e) - logging.error('Error getting object from s3://{}/{}'.format(self.config.bucket_name, object_key)) + logging.error('Error getting object from s3://{}/{}'.format(self.bucket_name, object_key)) @retry(stop_max_attempt_number=MAX_UP_DOWN_LOAD_RETRIES, wait_fixed=5000) async def _upload_blob(self, src: str, dest: str) -> ManifestObject: @@ -290,9 +308,9 @@ class S3BaseStorage(AbstractStorage): ) kms_args = {} - if self.config.kms_id is not None: + if self.kms_id is not None: kms_args['ServerSideEncryption'] = 'aws:kms' - kms_args['SSEKMSKeyId'] = self.config.kms_id + kms_args['SSEKMSKeyId'] = self.kms_id file_size = os.stat(src).st_size logging.debug( @@ -305,7 +323,7 @@ class S3BaseStorage(AbstractStorage): Filename=src, Bucket=self.bucket_name, Key=object_key, - Config=self.trasnfer_config, + Config=self.transfer_config, ExtraArgs=kms_args, ) @@ -322,12 +340,12 @@ class S3BaseStorage(AbstractStorage): async def _delete_object(self, obj: AbstractBlob): self.s3_client.delete_object( - Bucket=self.config.bucket_name, + Bucket=self.bucket_name, Key=obj.name ) async def _get_blob_metadata(self, blob_key: str) -> AbstractBlobMetadata: - resp = self.s3_client.head_object(Bucket=self.config.bucket_name, Key=blob_key) + resp = self.s3_client.head_object(Bucket=self.bucket_name, Key=blob_key) # the headers come as some non-default dict, so we need to re-package them blob_metadata = resp.get('ResponseMetadata', {}).get('HTTPHeaders', {})
thelastpickle/cassandra-medusa
951b127b7299a2ddeadb0e57567917cb6421db90
diff --git a/tests/storage/s3_storage_test.py b/tests/storage/s3_storage_test.py index 79af5b2..db889e5 100644 --- a/tests/storage/s3_storage_test.py +++ b/tests/storage/s3_storage_test.py @@ -186,6 +186,93 @@ class S3StorageTest(unittest.TestCase): # default AWS region self.assertEqual('us-east-1', credentials.region) + def test_make_s3_url(self): + with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()): + with tempfile.NamedTemporaryFile() as empty_file: + config = AttributeDict({ + 'storage_provider': 's3_us_west_oregon', + 'region': 'default', + 'key_file': empty_file.name, + 'api_profile': None, + 'kms_id': None, + 'transfer_max_bandwidth': None, + 'bucket_name': 'whatever-bucket', + 'secure': 'True', + 'host': None, + 'port': None, + }) + s3_storage = S3BaseStorage(config) + # there are no extra connection args when connecting to regular S3 + self.assertEqual( + dict(), + s3_storage.connection_extra_args + ) + + def test_make_s3_url_without_secure(self): + with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()): + with tempfile.NamedTemporaryFile() as empty_file: + config = AttributeDict({ + 'storage_provider': 's3_us_west_oregon', + 'region': 'default', + 'key_file': empty_file.name, + 'api_profile': None, + 'kms_id': None, + 'transfer_max_bandwidth': None, + 'bucket_name': 'whatever-bucket', + 'secure': 'False', + 'host': None, + 'port': None, + }) + s3_storage = S3BaseStorage(config) + # again, no extra connection args when connecting to regular S3 + # we can't even disable HTTPS + self.assertEqual( + dict(), + s3_storage.connection_extra_args + ) + + def test_make_s3_compatible_url(self): + with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()): + with tempfile.NamedTemporaryFile() as empty_file: + config = AttributeDict({ + 'storage_provider': 's3_compatible', + 'region': 'default', + 'key_file': empty_file.name, + 'api_profile': None, + 'kms_id': None, + 'transfer_max_bandwidth': None, + 'bucket_name': 'whatever-bucket', + 'secure': 'True', + 'host': 's3.example.com', + 'port': '443', + }) + s3_storage = S3BaseStorage(config) + self.assertEqual( + 'https://s3.example.com:443', + s3_storage.connection_extra_args['endpoint_url'] + ) + + def test_make_s3_compatible_url_without_secure(self): + with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()): + with tempfile.NamedTemporaryFile() as empty_file: + config = AttributeDict({ + 'storage_provider': 's3_compatible', + 'region': 'default', + 'key_file': empty_file.name, + 'api_profile': None, + 'kms_id': None, + 'transfer_max_bandwidth': None, + 'bucket_name': 'whatever-bucket', + 'secure': 'False', + 'host': 's3.example.com', + 'port': '8080', + }) + s3_storage = S3BaseStorage(config) + self.assertEqual( + 'http://s3.example.com:8080', + s3_storage.connection_extra_args['endpoint_url'] + ) + def _make_instance_metadata_mock(): # mock a call to the metadata service
Refactoring: s3 base storage - move url creation to a method [Project board link](https://github.com/orgs/k8ssandra/projects/8/views/1?pane=issue&itemId=36417073) The constructor of s3_base_storage is too big. One improvement is to move out the S3 API URL creation to a separate method. ## Definition of done - [ ] S3 URL API creation is extracted to a separate method.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url_without_secure", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url_without_secure" ]
[ "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_env_without_profile", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_everything", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_file", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_metadata", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region_and_s3_compatible_storage", "tests/storage/s3_storage_test.py::S3StorageTest::test_legacy_provider_region_replacement" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-09-21T10:55:50Z"
apache-2.0
thelastpickle__cassandra-medusa-685
diff --git a/docs/azure_blobs_setup.md b/docs/azure_blobs_setup.md index 135b7c5..447dbc7 100644 --- a/docs/azure_blobs_setup.md +++ b/docs/azure_blobs_setup.md @@ -11,15 +11,7 @@ Create a new storage account or use an existing one which will be used to store "key": "YOUR_KEY" } ``` -If you need to set a different host for Azure (for example the host for Azure Gov is `<storageAccount>.blob.core.usgovcloudapi.net`), please ADDITIONALLY set these two fields in the JSON file (the connection string can be found with the key): - -``` -"host": "YOUR_HOST" -"connection_string": "YOUR_CONNECTION_STRING" - -``` - -Place this file on all Apache Cassandra™ nodes running medusa under `/etc/medusa/`and set the rigths appropriately so that onyl users running Medusa can read/modify it. +Place this file on all Apache Cassandra™ nodes running medusa under `/etc/medusa/`and set the rights appropriately so that only users running Medusa can read/modify it. ### Create a container @@ -36,3 +28,8 @@ key_file = /etc/medusa/medusa-azure-credentials Medusa should now be able to access the bucket and perform all required operations. +If you need to set a different host for Azure (for example the host for Azure Gov is `<storageAccount>.blob.core.usgovcloudapi.net`), please use the `host` parameter in the `[storage]` section of `/etc/medusa/medusa.ini`: + +``` +"host": "usgovcloudapi.net" +``` diff --git a/medusa/storage/azure_storage.py b/medusa/storage/azure_storage.py index 9faee5a..9d25b79 100644 --- a/medusa/storage/azure_storage.py +++ b/medusa/storage/azure_storage.py @@ -49,15 +49,25 @@ class AzureStorage(AbstractStorage): self.account_name = self.credentials.named_key.name self.bucket_name = config.bucket_name + self.azure_blob_service_url = self._make_blob_service_url(self.account_name, config) + # disable chatty loggers logging.getLogger('azure.core.pipeline.policies.http_logging_policy').setLevel(logging.WARNING) logging.getLogger('chardet.universaldetector').setLevel(logging.WARNING) super().__init__(config) + def _make_blob_service_url(self, account_name, config): + domain = 'windows.net' if config.host is None else config.host + if config.port is None: + url = f"https://{account_name}.blob.core.{domain}/" + else: + url = f"https://{account_name}.blob.core.{domain}:{config.port}/" + return url + def connect(self): self.azure_blob_service = BlobServiceClient( - account_url=f"https://{self.account_name}.blob.core.windows.net/", + account_url=self.azure_blob_service_url, credential=self.credentials ) self.azure_container_client = self.azure_blob_service.get_container_client(self.bucket_name)
thelastpickle/cassandra-medusa
dca04bc05ead2998241301be0084680b12e0502b
diff --git a/tests/storage/abstract_storage_test.py b/tests/storage/abstract_storage_test.py index 6051142..5662483 100644 --- a/tests/storage/abstract_storage_test.py +++ b/tests/storage/abstract_storage_test.py @@ -18,7 +18,13 @@ import unittest from medusa.storage.abstract_storage import AbstractStorage -class S3StorageTest(unittest.TestCase): +class AttributeDict(dict): + __slots__ = () + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class AbstractStorageTest(unittest.TestCase): def test_convert_human_friendly_size_to_bytes(self): self.assertEqual(50, AbstractStorage._human_size_to_bytes('50B')) diff --git a/tests/storage/azure_storage_test.py b/tests/storage/azure_storage_test.py new file mode 100644 index 0000000..7809eac --- /dev/null +++ b/tests/storage/azure_storage_test.py @@ -0,0 +1,79 @@ +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import tempfile +import unittest + +from medusa.storage.azure_storage import AzureStorage +from tests.storage.abstract_storage_test import AttributeDict + + +class AzureStorageTest(unittest.TestCase): + + credentials_file_content = """ + { + "storage_account": "medusa-unit-test", + "key": "randomString==" + } + """ + + def test_make_connection_url(self): + with tempfile.NamedTemporaryFile() as credentials_file: + credentials_file.write(self.credentials_file_content.encode()) + credentials_file.flush() + config = AttributeDict({ + 'region': 'region-from-config', + 'storage_provider': 'azure_blobs', + 'key_file': credentials_file.name, + 'bucket_name': 'bucket-from-config', + 'concurrent_transfers': '1', + 'host': None, + 'port': None, + }) + azure_storage = AzureStorage(config) + self.assertEqual( + 'https://medusa-unit-test.blob.core.windows.net/', + azure_storage.azure_blob_service_url + ) + + def test_make_connection_url_with_custom_host(self): + with tempfile.NamedTemporaryFile() as credentials_file: + credentials_file.write(self.credentials_file_content.encode()) + credentials_file.flush() + config = AttributeDict({ + 'region': 'region-from-config', + 'storage_provider': 'azure_blobs', + 'key_file': credentials_file.name, + 'bucket_name': 'bucket-from-config', + 'concurrent_transfers': '1', + 'host': 'custom.host.net', + 'port': None, + }) + azure_storage = AzureStorage(config) + self.assertEqual( + 'https://medusa-unit-test.blob.core.custom.host.net/', + azure_storage.azure_blob_service_url + ) + + def test_make_connection_url_with_custom_host_port(self): + with tempfile.NamedTemporaryFile() as credentials_file: + credentials_file.write(self.credentials_file_content.encode()) + credentials_file.flush() + config = AttributeDict({ + 'region': 'region-from-config', + 'storage_provider': 'azure_blobs', + 'key_file': credentials_file.name, + 'bucket_name': 'bucket-from-config', + 'concurrent_transfers': '1', + 'host': 'custom.host.net', + 'port': 123, + }) + azure_storage = AzureStorage(config) + self.assertEqual( + 'https://medusa-unit-test.blob.core.custom.host.net:123/', + azure_storage.azure_blob_service_url + ) diff --git a/tests/storage/google_storage_test.py b/tests/storage/google_storage_test.py index 53bfd6d..891b49f 100644 --- a/tests/storage/google_storage_test.py +++ b/tests/storage/google_storage_test.py @@ -22,7 +22,7 @@ from pathlib import Path from medusa.storage.google_storage import _group_by_parent, _is_in_folder -class RestoreNodeTest(unittest.TestCase): +class GoogleStorageTest(unittest.TestCase): def test_is_in_folder(self): folder = Path('foo/bar') diff --git a/tests/storage/s3_storage_test.py b/tests/storage/s3_storage_test.py index 08e98c6..11a5daa 100644 --- a/tests/storage/s3_storage_test.py +++ b/tests/storage/s3_storage_test.py @@ -21,12 +21,7 @@ import tempfile from unittest.mock import patch, MagicMock from medusa.storage.s3_base_storage import S3BaseStorage - - -class AttributeDict(dict): - __slots__ = () - __getattr__ = dict.__getitem__ - __setattr__ = dict.__setitem__ +from tests.storage.abstract_storage_test import AttributeDict class S3StorageTest(unittest.TestCase):
Azure Storage Support for different Cloud Contexts (e.g. AzureUSGovernment) [Project board link](https://github.com/orgs/k8ssandra/projects/8/views/1?pane=issue&itemId=43950753) ## Overview Azure Storage looks to be configured for only Commercial cloud on line https://github.com/thelastpickle/cassandra-medusa/blob/master/medusa/storage/azure_storage.py#L58 _windows.net_ In AzureUSGovernment cloud, the URLs end with `usgovcloudapi.net`. The Azure SDK allows for the appropriate cloud context to be set, and with the original azure-cli implementation of storage that was possible `< 0.16.0`. ## The Ask The connection string for Azure Blob Storage should support a conditional so that it can *either* be the current `blob.core.windows.net` for "Azure" cloud or `blob.core.usgovcloudapi.net` for "AzureUSGovernment" cloud. This should be configurable in the medusa configuration file, rather than hard-coded in the above-referenced file. _I'd code this up and offer a patch, but I don't immediately have the time, and hoped that the maintainers and existing contributors could tackle that faster than I can_ ## Additional Azure References https://learn.microsoft.com/en-us/azure/azure-government/compare-azure-government-global-azure ![Screen Shot 2023-11-07 at 3 18 07 PM](https://github.com/thelastpickle/cassandra-medusa/assets/3441274/bf9aaed7-88a1-43bb-9882-b621d728dcd2)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/storage/azure_storage_test.py::AzureStorageTest::test_make_connection_url", "tests/storage/azure_storage_test.py::AzureStorageTest::test_make_connection_url_with_custom_host", "tests/storage/azure_storage_test.py::AzureStorageTest::test_make_connection_url_with_custom_host_port" ]
[ "tests/storage/abstract_storage_test.py::AbstractStorageTest::test_convert_human_friendly_size_to_bytes", "tests/storage/google_storage_test.py::GoogleStorageTest::test_group_by_parent", "tests/storage/google_storage_test.py::GoogleStorageTest::test_is_in_folder", "tests/storage/google_storage_test.py::GoogleStorageTest::test_iterator_hierarchy", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_env_without_profile", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_everything", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_file", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_metadata", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region_and_s3_compatible_storage", "tests/storage/s3_storage_test.py::S3StorageTest::test_legacy_provider_region_replacement", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url_without_secure", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url_without_secure" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2023-11-17T15:54:37Z"
apache-2.0
thelastpickle__cassandra-medusa-701
diff --git a/medusa-example.ini b/medusa-example.ini index da5ad97..9d7b4d3 100644 --- a/medusa-example.ini +++ b/medusa-example.ini @@ -112,6 +112,10 @@ use_sudo_for_restore = True ; Configures the use of SSL to connect to the object storage system. ;secure = True +; Enables verification of certificates used in case secure is set to True. +; Enabling this is not yet supported - we don't have a good way to configure paths to custom certificates. +; ssl_verify = False + ;aws_cli_path = <Location of the aws cli binary if not in PATH> [monitoring] diff --git a/medusa/config.py b/medusa/config.py index d8ec0e7..60d1477 100644 --- a/medusa/config.py +++ b/medusa/config.py @@ -30,8 +30,8 @@ StorageConfig = collections.namedtuple( 'StorageConfig', ['bucket_name', 'key_file', 'prefix', 'fqdn', 'host_file_separator', 'storage_provider', 'base_path', 'max_backup_age', 'max_backup_count', 'api_profile', 'transfer_max_bandwidth', - 'concurrent_transfers', 'multi_part_upload_threshold', 'host', 'region', 'port', 'secure', 'aws_cli_path', - 'kms_id', 'backup_grace_period_in_days', 'use_sudo_for_restore', 'k8s_mode'] + 'concurrent_transfers', 'multi_part_upload_threshold', 'host', 'region', 'port', 'secure', 'ssl_verify', + 'aws_cli_path', 'kms_id', 'backup_grace_period_in_days', 'use_sudo_for_restore', 'k8s_mode'] ) CassandraConfig = collections.namedtuple( @@ -111,6 +111,7 @@ def _build_default_config(): 'concurrent_transfers': '1', 'multi_part_upload_threshold': str(20 * 1024 * 1024), 'secure': 'True', + 'ssl_verify': 'False', # False until we work out how to specify custom certs 'aws_cli_path': 'aws', 'fqdn': socket.getfqdn(), 'region': 'default', diff --git a/medusa/storage/s3_base_storage.py b/medusa/storage/s3_base_storage.py index 8e5b51e..ad6648e 100644 --- a/medusa/storage/s3_base_storage.py +++ b/medusa/storage/s3_base_storage.py @@ -162,6 +162,7 @@ class S3BaseStorage(AbstractStorage): def _make_connection_arguments(self, config) -> t.Dict[str, str]: secure = config.secure or 'True' + ssl_verify = config.ssl_verify or 'False' # False until we work out how to specify custom certs host = config.host port = config.port @@ -175,7 +176,7 @@ class S3BaseStorage(AbstractStorage): s3_url = '{}://{}:{}'.format(protocol, host, port) return { 'endpoint_url': s3_url, - 'verify': protocol == 'https' + 'verify': ssl_verify.lower() == 'true' } def _make_transfer_config(self, config):
thelastpickle/cassandra-medusa
50e1dad860ce78301da46f6fbac2cd8eb982509d
diff --git a/tests/storage/s3_storage_test.py b/tests/storage/s3_storage_test.py index 0ae0719..acbaa6d 100644 --- a/tests/storage/s3_storage_test.py +++ b/tests/storage/s3_storage_test.py @@ -217,6 +217,7 @@ class S3StorageTest(unittest.TestCase): 'transfer_max_bandwidth': None, 'bucket_name': 'whatever-bucket', 'secure': 'True', + 'ssl_verify': 'False', 'host': None, 'port': None, 'concurrent_transfers': '1' @@ -240,6 +241,7 @@ class S3StorageTest(unittest.TestCase): 'transfer_max_bandwidth': None, 'bucket_name': 'whatever-bucket', 'secure': 'False', + 'ssl_verify': 'False', 'host': None, 'port': None, 'concurrent_transfers': '1' @@ -264,6 +266,7 @@ class S3StorageTest(unittest.TestCase): 'transfer_max_bandwidth': None, 'bucket_name': 'whatever-bucket', 'secure': 'True', + 'ssl_verify': 'False', 'host': 's3.example.com', 'port': '443', 'concurrent_transfers': '1' @@ -286,6 +289,7 @@ class S3StorageTest(unittest.TestCase): 'transfer_max_bandwidth': None, 'bucket_name': 'whatever-bucket', 'secure': 'False', + 'ssl_verify': 'False', 'host': 's3.example.com', 'port': '8080', 'concurrent_transfers': '1' @@ -296,6 +300,46 @@ class S3StorageTest(unittest.TestCase): s3_storage.connection_extra_args['endpoint_url'] ) + def test_make_connection_arguments_without_ssl_verify(self): + with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()): + config = AttributeDict({ + 'storage_provider': 's3_compatible', + 'region': 'default', + 'key_file': '/tmp/whatever', + 'api_profile': None, + 'kms_id': None, + 'transfer_max_bandwidth': None, + 'bucket_name': 'whatever-bucket', + 'secure': 'False', + 'ssl_verify': 'False', + 'host': 's3.example.com', + 'port': '8080', + 'concurrent_transfers': '1' + }) + s3_storage = S3BaseStorage(config) + connection_args = s3_storage._make_connection_arguments(config) + self.assertEqual(False, connection_args['verify']) + + def test_make_connection_arguments_with_ssl_verify(self): + with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()): + config = AttributeDict({ + 'storage_provider': 's3_compatible', + 'region': 'default', + 'key_file': '/tmp/whatever', + 'api_profile': None, + 'kms_id': None, + 'transfer_max_bandwidth': None, + 'bucket_name': 'whatever-bucket', + 'secure': 'False', + 'ssl_verify': 'True', + 'host': 's3.example.com', + 'port': '8080', + 'concurrent_transfers': '1' + }) + s3_storage = S3BaseStorage(config) + connection_args = s3_storage._make_connection_arguments(config) + self.assertEqual(True, connection_args['verify']) + def test_assume_role_authentication(self): with patch('botocore.httpsession.URLLib3Session', new=_make_assume_role_with_web_identity_mock()): if os.environ.get('AWS_ACCESS_KEY_ID', None): @@ -328,6 +372,7 @@ class S3StorageTest(unittest.TestCase): 'transfer_max_bandwidth': None, 'bucket_name': 'whatever-bucket', 'secure': 'True', + 'ssl_verify': 'False', 'host': None, 'port': None, 'concurrent_transfers': '1'
Allow enabling/disabling SSL certificate verification [Project board link](https://github.com/orgs/k8ssandra/projects/8/views/1?pane=issue&itemId=49576848) We currently force SSL certificate verification as soon as HTTPs is enabled (via the storage.secure config setting). It turned out there's an issue with this. It's not clear how to point medusa/boto/etc to use a custom certificate. It always uses the ones from the certify python package. A proposed solution to this is to add a brand new setting, `ssl_verify` to control if we want to validate the certs or not.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/storage/s3_storage_test.py::S3StorageTest::test_make_connection_arguments_with_ssl_verify" ]
[ "tests/storage/s3_storage_test.py::S3StorageTest::test_assume_role_authentication", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_env_without_profile", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_everything", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_file", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_metadata", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region", "tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region_and_s3_compatible_storage", "tests/storage/s3_storage_test.py::S3StorageTest::test_legacy_provider_region_replacement", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_connection_arguments_without_ssl_verify", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url_without_secure", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url", "tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url_without_secure" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-10T15:58:31Z"
apache-2.0
theolind__pymysensors-122
diff --git a/mysensors/__init__.py b/mysensors/__init__.py index 520165a..ef9dec3 100644 --- a/mysensors/__init__.py +++ b/mysensors/__init__.py @@ -31,6 +31,10 @@ def get_const(protocol_version): version = protocol_version if parse_ver('1.5') <= parse_ver(version) < parse_ver('2.0'): path = 'mysensors.const_15' + elif parse_ver(version) >= parse_ver('2.2'): + path = 'mysensors.const_22' + elif parse_ver(version) >= parse_ver('2.1'): + path = 'mysensors.const_21' elif parse_ver(version) >= parse_ver('2.0'): path = 'mysensors.const_20' else: @@ -127,8 +131,8 @@ class Gateway(object): type=self.const.MessageType.set, payload=value) return None - def _handle_heartbeat(self, msg): - """Process a heartbeat message.""" + def _handle_smartsleep(self, msg): + """Process a message before going back to smartsleep.""" if not self.is_sensor(msg.node_id): return while self.sensors[msg.node_id].queue: @@ -411,8 +415,8 @@ class Gateway(object): thread has sent all previously queued commands to the FIFO queue. If the sensor attribute new_state returns True, the command will not be put on the queue, but the internal sensor state will be updated. When a - heartbeat response is received, the internal state will be pushed to - the sensor, via _handle_heartbeat method. + smartsleep message is received, the internal state will be pushed to + the sensor, via _handle_smartsleep method. """ if not self.is_sensor(sensor_id, child_id): return diff --git a/mysensors/const_20.py b/mysensors/const_20.py index 98ff4fd..a27b6cd 100644 --- a/mysensors/const_20.py +++ b/mysensors/const_20.py @@ -1,4 +1,4 @@ -"""MySensors constants for version 1.5 of MySensors.""" +"""MySensors constants for version 2.0 of MySensors.""" from enum import IntEnum import voluptuous as vol @@ -246,11 +246,6 @@ class Internal(IntEnum): I_REGISTRATION_REQUEST = 26 # Register request to GW I_REGISTRATION_RESPONSE = 27 # Register response from GW I_DEBUG = 28 # Debug message - I_SIGNAL_REPORT_REQUEST = 29 # Device signal strength request - I_SIGNAL_REPORT_REVERSE = 30 # Internal - I_SIGNAL_REPORT_RESPONSE = 31 # Device signal strength response (RSSI) - I_PRE_SLEEP_NOTIFICATION = 32 # Message sent before node is going to sleep - I_POST_SLEEP_NOTIFICATION = 33 # Message sent after node woke up class Stream(IntEnum): @@ -344,15 +339,6 @@ VALID_INTERNAL.update({ Internal.I_REGISTRATION_REQUEST: str, Internal.I_REGISTRATION_RESPONSE: str, Internal.I_DEBUG: str, - Internal.I_SIGNAL_REPORT_REQUEST: str, - Internal.I_SIGNAL_REPORT_REVERSE: vol.All( - vol.Coerce(int), vol.Coerce(str)), - Internal.I_SIGNAL_REPORT_RESPONSE: vol.All( - vol.Coerce(int), vol.Coerce(str)), - Internal.I_PRE_SLEEP_NOTIFICATION: vol.All( - vol.Coerce(int), vol.Coerce(str)), - Internal.I_POST_SLEEP_NOTIFICATION: vol.All( - vol.Coerce(int), vol.Coerce(str)), }) VALID_PAYLOADS = { @@ -370,7 +356,7 @@ HANDLE_INTERNAL.update({ 'node_id': 255, 'ack': 0, 'sub_type': Internal.I_DISCOVER, 'payload': ''}}, Internal.I_HEARTBEAT_RESPONSE: { - 'fun': '_handle_heartbeat'}, + 'fun': '_handle_smartsleep'}, Internal.I_DISCOVER_RESPONSE: { 'is_sensor': True}, }) diff --git a/mysensors/const_21.py b/mysensors/const_21.py new file mode 100644 index 0000000..7e08924 --- /dev/null +++ b/mysensors/const_21.py @@ -0,0 +1,107 @@ +"""MySensors constants for version 2.1 of MySensors.""" +from enum import IntEnum + +# pylint: disable=unused-import +from mysensors.const_20 import (HANDLE_INTERNAL, MAX_NODE_ID, # noqa: F401 + VALID_INTERNAL, VALID_PRESENTATION, + VALID_SETREQ, VALID_STREAM, VALID_TYPES, + MessageType, Presentation, SetReq, Stream) + + +class Internal(IntEnum): + """MySensors internal sub-types.""" + + # pylint: disable=too-few-public-methods + # Use this to report the battery level (in percent 0-100). + I_BATTERY_LEVEL = 0 + # Sensors can request the current time from the Controller using this + # message. The time will be reported as the seconds since 1970 + I_TIME = 1 + # Sensors report their library version at startup using this message type + I_VERSION = 2 + # Use this to request a unique node id from the controller. + I_ID_REQUEST = 3 + # Id response back to sensor. Payload contains sensor id. + I_ID_RESPONSE = 4 + # Start/stop inclusion mode of the Controller (1=start, 0=stop). + I_INCLUSION_MODE = 5 + # Config request from node. Reply with (M)etric or (I)mperal back to sensor + I_CONFIG = 6 + # When a sensor starts up, it broadcast a search request to all neighbor + # nodes. They reply with a I_FIND_PARENT_RESPONSE. + I_FIND_PARENT_REQUEST = 7 + I_FIND_PARENT = 7 # alias from version 2.0 + # Reply message type to I_FIND_PARENT request. + I_FIND_PARENT_RESPONSE = 8 + # Sent by the gateway to the Controller to trace-log a message + I_LOG_MESSAGE = 9 + # A message that can be used to transfer child sensors + # (from EEPROM routing table) of a repeating node. + I_CHILDREN = 10 + # Optional sketch name that can be used to identify sensor in the + # Controller GUI + I_SKETCH_NAME = 11 + # Optional sketch version that can be reported to keep track of the version + # of sensor in the Controller GUI. + I_SKETCH_VERSION = 12 + # Used by OTA firmware updates. Request for node to reboot. + I_REBOOT = 13 + # Send by gateway to controller when startup is complete + I_GATEWAY_READY = 14 + # Provides signing related preferences (first byte is preference version). + I_SIGNING_PRESENTATION = 15 + I_REQUEST_SIGNING = 15 # alias from version 1.5 + # Request for a nonce. + I_NONCE_REQUEST = 16 + I_GET_NONCE = 16 # alias from version 1.5 + # Payload is nonce data. + I_NONCE_RESPONSE = 17 + I_GET_NONCE_RESPONSE = 17 # alias from version 1.5 + I_HEARTBEAT_REQUEST = 18 + I_HEARTBEAT = 18 # alias from version 2.0 + I_PRESENTATION = 19 + I_DISCOVER_REQUEST = 20 + I_DISCOVER = 20 # alias from version 2.0 + I_DISCOVER_RESPONSE = 21 + I_HEARTBEAT_RESPONSE = 22 + # Node is locked (reason in string-payload). + I_LOCKED = 23 + I_PING = 24 # Ping sent to node, payload incremental hop counter + # In return to ping, sent back to sender, payload incremental hop counter + I_PONG = 25 + I_REGISTRATION_REQUEST = 26 # Register request to GW + I_REGISTRATION_RESPONSE = 27 # Register response from GW + I_DEBUG = 28 # Debug message + + +VALID_MESSAGE_TYPES = { + MessageType.presentation: list(Presentation), + MessageType.set: list(SetReq), + MessageType.req: list(SetReq), + MessageType.internal: list(Internal), + MessageType.stream: list(Stream), +} + + +VALID_INTERNAL = dict(VALID_INTERNAL) +VALID_INTERNAL.update({ + Internal.I_FIND_PARENT_REQUEST: '', + Internal.I_HEARTBEAT_REQUEST: '', + Internal.I_DISCOVER_REQUEST: '', +}) + +VALID_PAYLOADS = { + MessageType.presentation: VALID_PRESENTATION, + MessageType.set: VALID_SETREQ, + MessageType.req: {member: '' for member in list(SetReq)}, + MessageType.internal: VALID_INTERNAL, + MessageType.stream: VALID_STREAM, +} + +HANDLE_INTERNAL = dict(HANDLE_INTERNAL) +HANDLE_INTERNAL.update({ + Internal.I_GATEWAY_READY: { + 'log': 'info', 'msg': { + 'node_id': 255, 'ack': 0, 'sub_type': Internal.I_DISCOVER_REQUEST, + 'payload': ''}}, +}) diff --git a/mysensors/const_22.py b/mysensors/const_22.py new file mode 100644 index 0000000..6289960 --- /dev/null +++ b/mysensors/const_22.py @@ -0,0 +1,119 @@ +"""MySensors constants for version 2.2 of MySensors.""" +from enum import IntEnum + +import voluptuous as vol + +# pylint: disable=unused-import +from mysensors.const_21 import (HANDLE_INTERNAL, MAX_NODE_ID, # noqa: F401 + VALID_INTERNAL, VALID_PRESENTATION, + VALID_SETREQ, VALID_STREAM, VALID_TYPES, + MessageType, Presentation, SetReq, Stream) + + +class Internal(IntEnum): + """MySensors internal sub-types.""" + + # pylint: disable=too-few-public-methods + # Use this to report the battery level (in percent 0-100). + I_BATTERY_LEVEL = 0 + # Sensors can request the current time from the Controller using this + # message. The time will be reported as the seconds since 1970 + I_TIME = 1 + # Sensors report their library version at startup using this message type + I_VERSION = 2 + # Use this to request a unique node id from the controller. + I_ID_REQUEST = 3 + # Id response back to sensor. Payload contains sensor id. + I_ID_RESPONSE = 4 + # Start/stop inclusion mode of the Controller (1=start, 0=stop). + I_INCLUSION_MODE = 5 + # Config request from node. Reply with (M)etric or (I)mperal back to sensor + I_CONFIG = 6 + # When a sensor starts up, it broadcast a search request to all neighbor + # nodes. They reply with a I_FIND_PARENT_RESPONSE. + I_FIND_PARENT_REQUEST = 7 + I_FIND_PARENT = 7 # alias from version 2.0 + # Reply message type to I_FIND_PARENT request. + I_FIND_PARENT_RESPONSE = 8 + # Sent by the gateway to the Controller to trace-log a message + I_LOG_MESSAGE = 9 + # A message that can be used to transfer child sensors + # (from EEPROM routing table) of a repeating node. + I_CHILDREN = 10 + # Optional sketch name that can be used to identify sensor in the + # Controller GUI + I_SKETCH_NAME = 11 + # Optional sketch version that can be reported to keep track of the version + # of sensor in the Controller GUI. + I_SKETCH_VERSION = 12 + # Used by OTA firmware updates. Request for node to reboot. + I_REBOOT = 13 + # Send by gateway to controller when startup is complete + I_GATEWAY_READY = 14 + # Provides signing related preferences (first byte is preference version). + I_SIGNING_PRESENTATION = 15 + I_REQUEST_SIGNING = 15 # alias from version 1.5 + # Request for a nonce. + I_NONCE_REQUEST = 16 + I_GET_NONCE = 16 # alias from version 1.5 + # Payload is nonce data. + I_NONCE_RESPONSE = 17 + I_GET_NONCE_RESPONSE = 17 # alias from version 1.5 + I_HEARTBEAT_REQUEST = 18 + I_HEARTBEAT = 18 # alias from version 2.0 + I_PRESENTATION = 19 + I_DISCOVER_REQUEST = 20 + I_DISCOVER = 20 # alias from version 2.0 + I_DISCOVER_RESPONSE = 21 + I_HEARTBEAT_RESPONSE = 22 + # Node is locked (reason in string-payload). + I_LOCKED = 23 + I_PING = 24 # Ping sent to node, payload incremental hop counter + # In return to ping, sent back to sender, payload incremental hop counter + I_PONG = 25 + I_REGISTRATION_REQUEST = 26 # Register request to GW + I_REGISTRATION_RESPONSE = 27 # Register response from GW + I_DEBUG = 28 # Debug message + I_SIGNAL_REPORT_REQUEST = 29 # Device signal strength request + I_SIGNAL_REPORT_REVERSE = 30 # Internal + I_SIGNAL_REPORT_RESPONSE = 31 # Device signal strength response (RSSI) + I_PRE_SLEEP_NOTIFICATION = 32 # Message sent before node is going to sleep + I_POST_SLEEP_NOTIFICATION = 33 # Message sent after node woke up + + +VALID_MESSAGE_TYPES = { + MessageType.presentation: list(Presentation), + MessageType.set: list(SetReq), + MessageType.req: list(SetReq), + MessageType.internal: list(Internal), + MessageType.stream: list(Stream), +} + + +VALID_INTERNAL = dict(VALID_INTERNAL) +VALID_INTERNAL.update({ + Internal.I_SIGNAL_REPORT_REQUEST: str, + Internal.I_SIGNAL_REPORT_REVERSE: vol.All( + vol.Coerce(int), vol.Coerce(str)), + Internal.I_SIGNAL_REPORT_RESPONSE: vol.All( + vol.Coerce(int), vol.Coerce(str)), + Internal.I_PRE_SLEEP_NOTIFICATION: vol.All( + vol.Coerce(int), vol.Coerce(str)), + Internal.I_POST_SLEEP_NOTIFICATION: vol.All( + vol.Coerce(int), vol.Coerce(str)), +}) + +VALID_PAYLOADS = { + MessageType.presentation: VALID_PRESENTATION, + MessageType.set: VALID_SETREQ, + MessageType.req: {member: '' for member in list(SetReq)}, + MessageType.internal: VALID_INTERNAL, + MessageType.stream: VALID_STREAM, +} + +HANDLE_INTERNAL = dict(HANDLE_INTERNAL) +HANDLE_INTERNAL.pop(Internal.I_HEARTBEAT_RESPONSE, None) +HANDLE_INTERNAL.update({ + Internal.I_PRE_SLEEP_NOTIFICATION: { + 'fun': '_handle_smartsleep'}, +})
theolind/pymysensors
b6deffc604865bba583bf82c089fdfc1d38da4e8
diff --git a/tests/test_message.py b/tests/test_message.py index bdd7dda..aac8a1b 100644 --- a/tests/test_message.py +++ b/tests/test_message.py @@ -162,6 +162,19 @@ INTERNAL_FIXTURES_20.update({ 'I_REGISTRATION_REQUEST': '2.0.0', 'I_REGISTRATION_RESPONSE': '1', 'I_DEBUG': 'test debug', +}) + + +INTERNAL_FIXTURES_21 = dict(INTERNAL_FIXTURES_20) +INTERNAL_FIXTURES_21.update({ + 'I_FIND_PARENT_REQUEST': '', + 'I_HEARTBEAT_REQUEST': '', + 'I_DISCOVER_REQUEST': '', +}) + + +INTERNAL_FIXTURES_22 = dict(INTERNAL_FIXTURES_21) +INTERNAL_FIXTURES_22.update({ 'I_SIGNAL_REPORT_REQUEST': 'test', 'I_SIGNAL_REPORT_REVERSE': '123', 'I_SIGNAL_REPORT_RESPONSE': '123', @@ -274,7 +287,8 @@ def test_validate_internal(): """Test Internal messages.""" versions = [ ('1.4', INTERNAL_FIXTURES_14), ('1.5', INTERNAL_FIXTURES_15), - ('2.0', INTERNAL_FIXTURES_20)] + ('2.0', INTERNAL_FIXTURES_20), ('2.1', INTERNAL_FIXTURES_21), + ('2.2', INTERNAL_FIXTURES_22)] for protocol_version, fixture in versions: gateway = get_gateway(protocol_version) const = get_const(protocol_version) @@ -287,7 +301,15 @@ def test_validate_internal(): return_value = None sub_type = const.Internal[name] msg = Message('1;255;3;0;{};{}\n'.format(sub_type, _payload)) - valid = msg.validate(protocol_version) + try: + valid = msg.validate(protocol_version) + except vol.MultipleInvalid: + print('fixture version: ', protocol_version) + print('gateway version: ', gateway.protocol_version) + print('name: ', name) + print('subtype: ', sub_type) + print('payload: ', _payload) + raise assert valid == { 'node_id': 1, 'child_id': 255, 'type': 3, 'ack': 0, 'sub_type': sub_type, 'payload': _payload} diff --git a/tests/test_mysensors.py b/tests/test_mysensors.py index 56879bd..e9eb14b 100644 --- a/tests/test_mysensors.py +++ b/tests/test_mysensors.py @@ -668,8 +668,8 @@ class TestGateway20(TestGateway): ret = self.gateway.handle_queue() self.assertEqual(ret, '1;255;3;0;19;\n') - def test_heartbeat(self): - """Test heartbeat message.""" + def test_smartsleep(self): + """Test smartsleep feature.""" sensor = self._add_sensor(1) sensor.children[0] = ChildSensor( 0, self.gateway.const.Presentation.S_LIGHT_LEVEL) @@ -708,8 +708,8 @@ class TestGateway20(TestGateway): # nothing has changed self.assertEqual(ret, None) - def test_heartbeat_from_unknown(self): - """Test heartbeat message from unknown node.""" + def test_smartsleep_from_unknown(self): + """Test smartsleep message from unknown node.""" self.gateway.logic('1;255;3;0;22;\n') ret = self.gateway.handle_queue() self.assertEqual(ret, '1;255;3;0;19;\n') @@ -774,6 +774,81 @@ class TestGateway20(TestGateway): '10.0,10.0,10.0') +class TestGateway21(TestGateway20): + """Use protocol_version 2.1.""" + + def setUp(self): + """Set up gateway.""" + self.gateway = Gateway(protocol_version='2.1') + + +class TestGateway22(TestGateway21): + """Use protocol_version 2.2.""" + + def setUp(self): + """Set up gateway.""" + self.gateway = Gateway(protocol_version='2.2') + + def test_smartsleep(self): + """Test smartsleep feature.""" + sensor = self._add_sensor(1) + sensor.children[0] = ChildSensor( + 0, self.gateway.const.Presentation.S_LIGHT_LEVEL) + self.gateway.logic('1;0;1;0;23;43\n') + ret = self.gateway.handle_queue() + self.assertEqual(ret, None) + # pre sleep message + self.gateway.logic('1;255;3;0;32;500\n') + ret = self.gateway.handle_queue() + # nothing has changed + self.assertEqual(ret, None) + # change from controller side + self.gateway.set_child_value( + 1, 0, self.gateway.const.SetReq.V_LIGHT_LEVEL, '57') + ret = self.gateway.handle_queue() + # no pre sleep message + self.assertEqual(ret, None) + # pre sleep message comes in + self.gateway.logic('1;255;3;0;32;500\n') + ret = self.gateway.handle_queue() + # instance responds with new values + self.assertEqual(ret, '1;0;1;0;23;57\n') + # request from node + self.gateway.logic('1;0;2;0;23;\n') + ret = self.gateway.handle_queue() + # no pre sleep message + self.assertEqual(ret, None) + # pre sleep message + self.gateway.logic('1;255;3;0;32;500\n') + ret = self.gateway.handle_queue() + # instance responds to request with current value + self.assertEqual(ret, '1;0;1;0;23;57\n') + # pre sleep message + self.gateway.logic('1;255;3;0;32;500\n') + ret = self.gateway.handle_queue() + # nothing has changed + self.assertEqual(ret, None) + + def test_smartsleep_from_unknown(self): + """Test smartsleep message from unknown node.""" + self.gateway.logic('1;255;3;0;32;500\n') + ret = self.gateway.handle_queue() + self.assertEqual(ret, '1;255;3;0;19;\n') + + def test_set_with_new_state(self): + """Test set message with populated new_state.""" + sensor = self._add_sensor(1) + sensor.children[0] = ChildSensor( + 0, self.gateway.const.Presentation.S_LIGHT_LEVEL) + self.gateway.logic('1;0;1;0;23;43\n') + self.gateway.logic('1;255;3;0;32;500\n') + self.gateway.logic('1;0;1;0;23;57\n') + self.assertEqual( + sensor.children[0].values[self.gateway.const.SetReq.V_LIGHT_LEVEL], + sensor.new_state[0].values[ + self.gateway.const.SetReq.V_LIGHT_LEVEL]) + + def test_gateway_bad_protocol(): """Test initializing gateway with a bad protocol_version.""" gateway = Gateway(protocol_version=None)
Add support for I_PRE_SLEEP_NOTIFICATION in 2.2.0 Version 2.2.0 changed the behavior of smartsleep. Instead of sending a hearbeat before going back to sleep, the node now sends a `I_PRE_SLEEP_NOTIFICATION` internal message. The node also sends a `I_POST_SLEEP_NOTIFICATION` after waking up from sleep. See: https://github.com/mysensors/MySensors/pull/722 **Breaking change** The change outlined above broke the smartsleep feature in pymysensors for users using version 2.2.0 of mysensors. **Suggested fix** Use different internal message types for smartsleep method for different mysensors versions. Make a new const module and update these lines: https://github.com/theolind/pymysensors/blob/dev/mysensors/const_20.py#L372-L373
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_message.py::test_validate_internal", "tests/test_mysensors.py::TestGateway22::test_set_with_new_state", "tests/test_mysensors.py::TestGateway22::test_smartsleep", "tests/test_mysensors.py::TestGateway22::test_smartsleep_from_unknown" ]
[ "tests/test_message.py::TestMessage::test_decode", "tests/test_message.py::TestMessage::test_decode_bad_message", "tests/test_message.py::TestMessage::test_encode", "tests/test_message.py::TestMessage::test_encode_bad_message", "tests/test_message.py::test_validate_pres", "tests/test_message.py::test_validate_bad_pres", "tests/test_message.py::test_validate_set", "tests/test_mysensors.py::TestGateway::test_bad_battery_level", "tests/test_mysensors.py::TestGateway::test_bad_file_name", "tests/test_mysensors.py::TestGateway::test_battery_level", "tests/test_mysensors.py::TestGateway::test_callback", "tests/test_mysensors.py::TestGateway::test_callback_exception", "tests/test_mysensors.py::TestGateway::test_child_validate", "tests/test_mysensors.py::TestGateway::test_id_request_with_node_zero", "tests/test_mysensors.py::TestGateway::test_internal_config", "tests/test_mysensors.py::TestGateway::test_internal_gateway_ready", "tests/test_mysensors.py::TestGateway::test_internal_id_request", "tests/test_mysensors.py::TestGateway::test_internal_log_message", "tests/test_mysensors.py::TestGateway::test_internal_sketch_name", "tests/test_mysensors.py::TestGateway::test_internal_sketch_version", "tests/test_mysensors.py::TestGateway::test_internal_time", "tests/test_mysensors.py::TestGateway::test_json_empty_file_good_bak", "tests/test_mysensors.py::TestGateway::test_json_empty_files", "tests/test_mysensors.py::TestGateway::test_json_no_files", "tests/test_mysensors.py::TestGateway::test_json_persistence", "tests/test_mysensors.py::TestGateway::test_json_upgrade", "tests/test_mysensors.py::TestGateway::test_logic_bad_message", "tests/test_mysensors.py::TestGateway::test_non_presented_sensor", "tests/test_mysensors.py::TestGateway::test_persistence_at_init", "tests/test_mysensors.py::TestGateway::test_pickle_empty_files", "tests/test_mysensors.py::TestGateway::test_pickle_persistence", "tests/test_mysensors.py::TestGateway::test_pickle_upgrade", "tests/test_mysensors.py::TestGateway::test_present_humidity_sensor", "tests/test_mysensors.py::TestGateway::test_present_light_level_sensor", "tests/test_mysensors.py::TestGateway::test_present_same_child", "tests/test_mysensors.py::TestGateway::test_present_to_non_sensor", "tests/test_mysensors.py::TestGateway::test_presentation_arduino_node", "tests/test_mysensors.py::TestGateway::test_req", "tests/test_mysensors.py::TestGateway::test_req_notasensor", "tests/test_mysensors.py::TestGateway::test_req_novalue", "tests/test_mysensors.py::TestGateway::test_req_zerovalue", "tests/test_mysensors.py::TestGateway::test_set_and_reboot", "tests/test_mysensors.py::TestGateway::test_set_bad_battery_attribute", "tests/test_mysensors.py::TestGateway::test_set_child_no_children", "tests/test_mysensors.py::TestGateway::test_set_child_value", "tests/test_mysensors.py::TestGateway::test_set_child_value_bad_ack", "tests/test_mysensors.py::TestGateway::test_set_child_value_bad_type", "tests/test_mysensors.py::TestGateway::test_set_child_value_no_sensor", "tests/test_mysensors.py::TestGateway::test_set_child_value_value_type", "tests/test_mysensors.py::TestGateway::test_set_forecast", "tests/test_mysensors.py::TestGateway::test_set_humidity_level", "tests/test_mysensors.py::TestGateway::test_set_light_level", "tests/test_mysensors.py::TestGateway15::test_bad_battery_level", "tests/test_mysensors.py::TestGateway15::test_bad_file_name", "tests/test_mysensors.py::TestGateway15::test_battery_level", "tests/test_mysensors.py::TestGateway15::test_callback", "tests/test_mysensors.py::TestGateway15::test_callback_exception", "tests/test_mysensors.py::TestGateway15::test_child_validate", "tests/test_mysensors.py::TestGateway15::test_id_request_with_node_zero", "tests/test_mysensors.py::TestGateway15::test_internal_config", "tests/test_mysensors.py::TestGateway15::test_internal_gateway_ready", "tests/test_mysensors.py::TestGateway15::test_internal_id_request", "tests/test_mysensors.py::TestGateway15::test_internal_log_message", "tests/test_mysensors.py::TestGateway15::test_internal_sketch_name", "tests/test_mysensors.py::TestGateway15::test_internal_sketch_version", "tests/test_mysensors.py::TestGateway15::test_internal_time", "tests/test_mysensors.py::TestGateway15::test_json_empty_file_good_bak", "tests/test_mysensors.py::TestGateway15::test_json_empty_files", "tests/test_mysensors.py::TestGateway15::test_json_no_files", "tests/test_mysensors.py::TestGateway15::test_json_persistence", "tests/test_mysensors.py::TestGateway15::test_json_upgrade", "tests/test_mysensors.py::TestGateway15::test_logic_bad_message", "tests/test_mysensors.py::TestGateway15::test_non_presented_sensor", "tests/test_mysensors.py::TestGateway15::test_persistence_at_init", "tests/test_mysensors.py::TestGateway15::test_pickle_empty_files", "tests/test_mysensors.py::TestGateway15::test_pickle_persistence", "tests/test_mysensors.py::TestGateway15::test_pickle_upgrade", "tests/test_mysensors.py::TestGateway15::test_present_humidity_sensor", "tests/test_mysensors.py::TestGateway15::test_present_light_level_sensor", "tests/test_mysensors.py::TestGateway15::test_present_same_child", "tests/test_mysensors.py::TestGateway15::test_present_to_non_sensor", "tests/test_mysensors.py::TestGateway15::test_presentation_arduino_node", "tests/test_mysensors.py::TestGateway15::test_req", "tests/test_mysensors.py::TestGateway15::test_req_notasensor", "tests/test_mysensors.py::TestGateway15::test_req_novalue", "tests/test_mysensors.py::TestGateway15::test_req_zerovalue", "tests/test_mysensors.py::TestGateway15::test_set_and_reboot", "tests/test_mysensors.py::TestGateway15::test_set_bad_battery_attribute", "tests/test_mysensors.py::TestGateway15::test_set_child_no_children", "tests/test_mysensors.py::TestGateway15::test_set_child_value", "tests/test_mysensors.py::TestGateway15::test_set_child_value_bad_ack", "tests/test_mysensors.py::TestGateway15::test_set_child_value_bad_type", "tests/test_mysensors.py::TestGateway15::test_set_child_value_no_sensor", "tests/test_mysensors.py::TestGateway15::test_set_child_value_value_type", "tests/test_mysensors.py::TestGateway15::test_set_forecast", "tests/test_mysensors.py::TestGateway15::test_set_humidity_level", "tests/test_mysensors.py::TestGateway15::test_set_light_level", "tests/test_mysensors.py::TestGateway15::test_set_rgb", "tests/test_mysensors.py::TestGateway15::test_set_rgbw", "tests/test_mysensors.py::TestGateway20::test_bad_battery_level", "tests/test_mysensors.py::TestGateway20::test_bad_file_name", "tests/test_mysensors.py::TestGateway20::test_battery_level", "tests/test_mysensors.py::TestGateway20::test_callback", "tests/test_mysensors.py::TestGateway20::test_callback_exception", "tests/test_mysensors.py::TestGateway20::test_child_validate", "tests/test_mysensors.py::TestGateway20::test_discover_response_known", "tests/test_mysensors.py::TestGateway20::test_discover_response_unknown", "tests/test_mysensors.py::TestGateway20::test_id_request_with_node_zero", "tests/test_mysensors.py::TestGateway20::test_internal_config", "tests/test_mysensors.py::TestGateway20::test_internal_gateway_ready", "tests/test_mysensors.py::TestGateway20::test_internal_id_request", "tests/test_mysensors.py::TestGateway20::test_internal_log_message", "tests/test_mysensors.py::TestGateway20::test_internal_sketch_name", "tests/test_mysensors.py::TestGateway20::test_internal_sketch_version", "tests/test_mysensors.py::TestGateway20::test_internal_time", "tests/test_mysensors.py::TestGateway20::test_json_empty_file_good_bak", "tests/test_mysensors.py::TestGateway20::test_json_empty_files", "tests/test_mysensors.py::TestGateway20::test_json_no_files", "tests/test_mysensors.py::TestGateway20::test_json_persistence", "tests/test_mysensors.py::TestGateway20::test_json_upgrade", "tests/test_mysensors.py::TestGateway20::test_logic_bad_message", "tests/test_mysensors.py::TestGateway20::test_non_presented_child", "tests/test_mysensors.py::TestGateway20::test_non_presented_sensor", "tests/test_mysensors.py::TestGateway20::test_persistence_at_init", "tests/test_mysensors.py::TestGateway20::test_pickle_empty_files", "tests/test_mysensors.py::TestGateway20::test_pickle_persistence", "tests/test_mysensors.py::TestGateway20::test_pickle_upgrade", "tests/test_mysensors.py::TestGateway20::test_present_humidity_sensor", "tests/test_mysensors.py::TestGateway20::test_present_light_level_sensor", "tests/test_mysensors.py::TestGateway20::test_present_same_child", "tests/test_mysensors.py::TestGateway20::test_present_to_non_sensor", "tests/test_mysensors.py::TestGateway20::test_presentation_arduino_node", "tests/test_mysensors.py::TestGateway20::test_req", "tests/test_mysensors.py::TestGateway20::test_req_notasensor", "tests/test_mysensors.py::TestGateway20::test_req_novalue", "tests/test_mysensors.py::TestGateway20::test_req_zerovalue", "tests/test_mysensors.py::TestGateway20::test_set_and_reboot", "tests/test_mysensors.py::TestGateway20::test_set_bad_battery_attribute", "tests/test_mysensors.py::TestGateway20::test_set_child_no_children", "tests/test_mysensors.py::TestGateway20::test_set_child_value", "tests/test_mysensors.py::TestGateway20::test_set_child_value_bad_ack", "tests/test_mysensors.py::TestGateway20::test_set_child_value_bad_type", "tests/test_mysensors.py::TestGateway20::test_set_child_value_no_sensor", "tests/test_mysensors.py::TestGateway20::test_set_child_value_value_type", "tests/test_mysensors.py::TestGateway20::test_set_forecast", "tests/test_mysensors.py::TestGateway20::test_set_humidity_level", "tests/test_mysensors.py::TestGateway20::test_set_light_level", "tests/test_mysensors.py::TestGateway20::test_set_position", "tests/test_mysensors.py::TestGateway20::test_set_with_new_state", "tests/test_mysensors.py::TestGateway20::test_smartsleep", "tests/test_mysensors.py::TestGateway20::test_smartsleep_from_unknown", "tests/test_mysensors.py::TestGateway21::test_bad_battery_level", "tests/test_mysensors.py::TestGateway21::test_bad_file_name", "tests/test_mysensors.py::TestGateway21::test_battery_level", "tests/test_mysensors.py::TestGateway21::test_callback", "tests/test_mysensors.py::TestGateway21::test_callback_exception", "tests/test_mysensors.py::TestGateway21::test_child_validate", "tests/test_mysensors.py::TestGateway21::test_discover_response_known", "tests/test_mysensors.py::TestGateway21::test_discover_response_unknown", "tests/test_mysensors.py::TestGateway21::test_id_request_with_node_zero", "tests/test_mysensors.py::TestGateway21::test_internal_config", "tests/test_mysensors.py::TestGateway21::test_internal_gateway_ready", "tests/test_mysensors.py::TestGateway21::test_internal_id_request", "tests/test_mysensors.py::TestGateway21::test_internal_log_message", "tests/test_mysensors.py::TestGateway21::test_internal_sketch_name", "tests/test_mysensors.py::TestGateway21::test_internal_sketch_version", "tests/test_mysensors.py::TestGateway21::test_internal_time", "tests/test_mysensors.py::TestGateway21::test_json_empty_file_good_bak", "tests/test_mysensors.py::TestGateway21::test_json_empty_files", "tests/test_mysensors.py::TestGateway21::test_json_no_files", "tests/test_mysensors.py::TestGateway21::test_json_persistence", "tests/test_mysensors.py::TestGateway21::test_json_upgrade", "tests/test_mysensors.py::TestGateway21::test_logic_bad_message", "tests/test_mysensors.py::TestGateway21::test_non_presented_child", "tests/test_mysensors.py::TestGateway21::test_non_presented_sensor", "tests/test_mysensors.py::TestGateway21::test_persistence_at_init", "tests/test_mysensors.py::TestGateway21::test_pickle_empty_files", "tests/test_mysensors.py::TestGateway21::test_pickle_persistence", "tests/test_mysensors.py::TestGateway21::test_pickle_upgrade", "tests/test_mysensors.py::TestGateway21::test_present_humidity_sensor", "tests/test_mysensors.py::TestGateway21::test_present_light_level_sensor", "tests/test_mysensors.py::TestGateway21::test_present_same_child", "tests/test_mysensors.py::TestGateway21::test_present_to_non_sensor", "tests/test_mysensors.py::TestGateway21::test_presentation_arduino_node", "tests/test_mysensors.py::TestGateway21::test_req", "tests/test_mysensors.py::TestGateway21::test_req_notasensor", "tests/test_mysensors.py::TestGateway21::test_req_novalue", "tests/test_mysensors.py::TestGateway21::test_req_zerovalue", "tests/test_mysensors.py::TestGateway21::test_set_and_reboot", "tests/test_mysensors.py::TestGateway21::test_set_bad_battery_attribute", "tests/test_mysensors.py::TestGateway21::test_set_child_no_children", "tests/test_mysensors.py::TestGateway21::test_set_child_value", "tests/test_mysensors.py::TestGateway21::test_set_child_value_bad_ack", "tests/test_mysensors.py::TestGateway21::test_set_child_value_bad_type", "tests/test_mysensors.py::TestGateway21::test_set_child_value_no_sensor", "tests/test_mysensors.py::TestGateway21::test_set_child_value_value_type", "tests/test_mysensors.py::TestGateway21::test_set_forecast", "tests/test_mysensors.py::TestGateway21::test_set_humidity_level", "tests/test_mysensors.py::TestGateway21::test_set_light_level", "tests/test_mysensors.py::TestGateway21::test_set_position", "tests/test_mysensors.py::TestGateway21::test_set_with_new_state", "tests/test_mysensors.py::TestGateway21::test_smartsleep", "tests/test_mysensors.py::TestGateway21::test_smartsleep_from_unknown", "tests/test_mysensors.py::TestGateway22::test_bad_battery_level", "tests/test_mysensors.py::TestGateway22::test_bad_file_name", "tests/test_mysensors.py::TestGateway22::test_battery_level", "tests/test_mysensors.py::TestGateway22::test_callback", "tests/test_mysensors.py::TestGateway22::test_callback_exception", "tests/test_mysensors.py::TestGateway22::test_child_validate", "tests/test_mysensors.py::TestGateway22::test_discover_response_known", "tests/test_mysensors.py::TestGateway22::test_discover_response_unknown", "tests/test_mysensors.py::TestGateway22::test_id_request_with_node_zero", "tests/test_mysensors.py::TestGateway22::test_internal_config", "tests/test_mysensors.py::TestGateway22::test_internal_gateway_ready", "tests/test_mysensors.py::TestGateway22::test_internal_id_request", "tests/test_mysensors.py::TestGateway22::test_internal_log_message", "tests/test_mysensors.py::TestGateway22::test_internal_sketch_name", "tests/test_mysensors.py::TestGateway22::test_internal_sketch_version", "tests/test_mysensors.py::TestGateway22::test_internal_time", "tests/test_mysensors.py::TestGateway22::test_json_empty_file_good_bak", "tests/test_mysensors.py::TestGateway22::test_json_empty_files", "tests/test_mysensors.py::TestGateway22::test_json_no_files", "tests/test_mysensors.py::TestGateway22::test_json_persistence", "tests/test_mysensors.py::TestGateway22::test_json_upgrade", "tests/test_mysensors.py::TestGateway22::test_logic_bad_message", "tests/test_mysensors.py::TestGateway22::test_non_presented_child", "tests/test_mysensors.py::TestGateway22::test_non_presented_sensor", "tests/test_mysensors.py::TestGateway22::test_persistence_at_init", "tests/test_mysensors.py::TestGateway22::test_pickle_empty_files", "tests/test_mysensors.py::TestGateway22::test_pickle_persistence", "tests/test_mysensors.py::TestGateway22::test_pickle_upgrade", "tests/test_mysensors.py::TestGateway22::test_present_humidity_sensor", "tests/test_mysensors.py::TestGateway22::test_present_light_level_sensor", "tests/test_mysensors.py::TestGateway22::test_present_same_child", "tests/test_mysensors.py::TestGateway22::test_present_to_non_sensor", "tests/test_mysensors.py::TestGateway22::test_presentation_arduino_node", "tests/test_mysensors.py::TestGateway22::test_req", "tests/test_mysensors.py::TestGateway22::test_req_notasensor", "tests/test_mysensors.py::TestGateway22::test_req_novalue", "tests/test_mysensors.py::TestGateway22::test_req_zerovalue", "tests/test_mysensors.py::TestGateway22::test_set_and_reboot", "tests/test_mysensors.py::TestGateway22::test_set_bad_battery_attribute", "tests/test_mysensors.py::TestGateway22::test_set_child_no_children", "tests/test_mysensors.py::TestGateway22::test_set_child_value", "tests/test_mysensors.py::TestGateway22::test_set_child_value_bad_ack", "tests/test_mysensors.py::TestGateway22::test_set_child_value_bad_type", "tests/test_mysensors.py::TestGateway22::test_set_child_value_no_sensor", "tests/test_mysensors.py::TestGateway22::test_set_child_value_value_type", "tests/test_mysensors.py::TestGateway22::test_set_forecast", "tests/test_mysensors.py::TestGateway22::test_set_humidity_level", "tests/test_mysensors.py::TestGateway22::test_set_light_level", "tests/test_mysensors.py::TestGateway22::test_set_position", "tests/test_mysensors.py::test_gateway_bad_protocol", "tests/test_mysensors.py::test_gateway_low_protocol" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-02-26T19:06:22Z"
mit
theopenconversationkit__tock-py-9
diff --git a/tock/models.py b/tock/models.py index 67301f0..e3f7eda 100644 --- a/tock/models.py +++ b/tock/models.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- import abc +from dataclasses import dataclass from datetime import datetime from enum import Enum -from re import split from typing import List, Union, Optional from tock.intent import IntentName @@ -13,103 +13,84 @@ class PlayerType(Enum): BOT = "bot" +@dataclass class Entity: - - def __init__(self, type: str, role: str, evaluated: bool, new: bool, content: str = None, value: str = None): - self.type = type - self.role = role - self.content = content - self.value = value - self.evaluated = evaluated - self.sub_entities = [] - self.new = new + type: str + role: str + evaluated: bool + sub_entities = [] + new: bool + content: str = None + value: Optional[str] = None +@dataclass class Message: - - def __init__(self, type: str, text: str): - self.type = type - self.text = text + type: str + text: str +@dataclass class ConnectorType: - - def __init__(self, id: str, user_interface_type: str): - self.id = id - self.user_interface_type = user_interface_type + id: str + user_interface_type: str +@dataclass class UserId: - - def __init__(self, id: str, type: PlayerType, client_id: str = None): - self.id = id - self.type = type - self.client_id = client_id + id: str + type: PlayerType + client_id: Optional[str] = None +@dataclass class User: - - def __init__(self, timezone: str, locale: str, test: bool): - self.timezone = timezone - self.locale = locale - self.test = test + timezone: str + locale: str + test: bool +@dataclass class RequestContext: - - def __init__(self, - namespace: str, - language: str, - connector_type: ConnectorType, - user_interface: str, - application_id: str, - user_id: UserId, - bot_id: UserId, - user: User): - self.namespace = namespace - self.language = language - self.connector_type = connector_type - self.user_interface = user_interface - self.application_id = application_id - self.user_id = user_id - self.bot_id = bot_id - self.user = user + namespace: str + language: str + connector_type: ConnectorType + user_interface: str + application_id: str + user_id: UserId + bot_id: UserId + user: User +@dataclass class I18nText: - - def __init__(self, - text: str, - args: [], - to_be_translated: bool, - length: int, - key: Optional[str] = None - ): - self.text = text - self.args = args - self.to_be_translated = to_be_translated - self.length = length - self.key = key + text: str + args: [] + to_be_translated: bool + length: int + key: Optional[str] = None +@dataclass class Suggestion: - - def __init__(self, title: I18nText): - self.title = title + title: I18nText +@dataclass class BotMessage(abc.ABC): - - def __init__(self, delay: int = 0): - self.delay = delay + delay: int +@dataclass class Sentence(BotMessage): + text: I18nText + suggestions: List[Suggestion] + delay: int def __init__(self, text: I18nText, suggestions: List[Suggestion], - delay: int): + delay: int = 0): self.text = text self.suggestions = suggestions super().__init__(delay) @@ -168,28 +149,32 @@ class AttachmentType(Enum): FILE = "file" +@dataclass class Attachment: - - def __init__(self, url: str, type: Optional[AttachmentType]): - self.url = url - self.type = type + url: str + type: Optional[AttachmentType] +@dataclass class Action: - - def __init__(self, title: I18nText, url: Optional[str]): - self.title = title - self.url = url + title: I18nText + url: Optional[str] +@dataclass class Card(BotMessage): + title: Optional[I18nText] + sub_title: Optional[I18nText] + attachment: Optional[Attachment] + actions: List[Action] + delay: int def __init__(self, title: Optional[I18nText], sub_title: Optional[I18nText], attachment: Optional[Attachment], actions: List[Action], - delay: int): + delay: int = 0): self.title = title self.sub_title = sub_title self.attachment = attachment @@ -258,9 +243,12 @@ class Card(BotMessage): ) +@dataclass class Carousel(BotMessage): + cards: List[Card] + delay: int - def __init__(self, cards: List[Card], delay: int): + def __init__(self, cards: List[Card], delay: int = 0): self.cards = cards super().__init__(delay) @@ -281,38 +269,32 @@ class Carousel(BotMessage): ) +@dataclass class ResponseContext: - - def __init__(self, request_id: str, date: datetime): - self.request_id = request_id - self.date = date + request_id: str + date: datetime +@dataclass class BotRequest: - - def __init__(self, intent: IntentName, entities: List[Entity], message: Message, story_id: str, - request_context: RequestContext = None): - self.intent = intent - self.entities = entities - self.message = message - self.story_id = story_id - self.request_context = request_context + intent: IntentName + entities: List[Entity] + message: Message + story_id: str + request_context: RequestContext = None +@dataclass class BotResponse: - - def __init__(self, messages: List[BotMessage], story_id: str, step: str, context: ResponseContext, - entities: List[Entity]): - self.messages = messages - self.story_id = story_id - self.step = step - self.entities = entities - self.context = context + messages: List[BotMessage] + story_id: str + step: str + context: ResponseContext + entities: List[Entity] +@dataclass class TockMessage: - - def __init__(self, request_id: str, bot_request: BotRequest = None, bot_response: BotResponse = None): - self.bot_request = bot_request - self.bot_response = bot_response - self.request_id = request_id + request_id: str + bot_request: BotRequest = None + bot_response: BotResponse = None
theopenconversationkit/tock-py
896d1baf4cd66a414ad72ee081f3d1288dc01bf5
diff --git a/tock/tests/test_schemas.py b/tock/tests/test_schemas.py index f04c159..5006254 100644 --- a/tock/tests/test_schemas.py +++ b/tock/tests/test_schemas.py @@ -209,7 +209,7 @@ class TestEntitySchema(TestCase): expected = given_entity() schema = EntitySchema() result = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestMessageSchema(TestCase): @@ -217,7 +217,7 @@ class TestMessageSchema(TestCase): expected = given_message() schema = MessageSchema() result: Message = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestConnectorTypeSchema(TestCase): @@ -225,7 +225,7 @@ class TestConnectorTypeSchema(TestCase): expected = given_connector_type() schema = ConnectorTypeSchema() result: ConnectorType = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestUserIdSchema(TestCase): @@ -233,7 +233,7 @@ class TestUserIdSchema(TestCase): expected = given_user_id() schema = UserIdSchema() result: UserId = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestUserSchema(TestCase): @@ -241,7 +241,7 @@ class TestUserSchema(TestCase): expected = given_user() schema = UserSchema() result: User = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestRequestContextSchema(TestCase): @@ -249,7 +249,7 @@ class TestRequestContextSchema(TestCase): expected = given_request_context() schema = RequestContextSchema() result: RequestContext = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestSuggestionSchema(TestCase): @@ -257,7 +257,7 @@ class TestSuggestionSchema(TestCase): expected = given_suggestion() schema = SuggestionSchema() result: Suggestion = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestI18nTextSchema(TestCase): @@ -265,7 +265,7 @@ class TestI18nTextSchema(TestCase): expected = given_i18n_text() schema = I18nTextSchema() result: I18nText = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestSentenceSchema(TestCase): @@ -275,7 +275,7 @@ class TestSentenceSchema(TestCase): dumps = schema.dumps(expected) loads = json.loads(dumps) result: Sentence = schema.load(loads) - compare(expected, result) + self.assertEqual(expected, result) class TestAttachmentSchema(TestCase): @@ -285,7 +285,7 @@ class TestAttachmentSchema(TestCase): dumps = schema.dumps(expected) loads = json.loads(dumps) result: Attachment = schema.load(loads) - compare(expected, result) + self.assertEqual(expected, result) class TestActionSchema(TestCase): @@ -293,7 +293,7 @@ class TestActionSchema(TestCase): expected = given_action() schema = ActionSchema() result: Action = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) class TestCardSchema(TestCase): @@ -303,7 +303,7 @@ class TestCardSchema(TestCase): dumps = schema.dumps(expected) loads = json.loads(dumps) result: Card = schema.load(loads) - compare(expected, result) + self.assertEqual(expected, result) class TestCarouselSchema(TestCase): @@ -313,7 +313,7 @@ class TestCarouselSchema(TestCase): dumps = schema.dumps(expected) loads = json.loads(dumps) result: Carousel = schema.load(loads) - compare(expected, result) + self.assertEqual(expected, result) class TestResponseContextSchema(TestCase): @@ -323,7 +323,7 @@ class TestResponseContextSchema(TestCase): dumps = schema.dumps(expected) loads = json.loads(dumps) result: Sentence = schema.load(loads) - compare(expected, result) + self.assertEqual(expected, result) class TestBotRequestSchema(TestCase): @@ -333,7 +333,7 @@ class TestBotRequestSchema(TestCase): dumps = schema.dumps(expected) loads = json.loads(dumps) result: BotRequest = schema.load(loads) - compare(expected, result) + self.assertEqual(expected, result) class TestBotResponseSchema(TestCase): @@ -343,7 +343,7 @@ class TestBotResponseSchema(TestCase): dumps = schema.dumps(expected) loads = json.loads(dumps) result: BotResponse = schema.load(loads) - compare(expected, result) + self.assertEqual(expected, result) class TestTockMessageSchema(TestCase): @@ -351,7 +351,7 @@ class TestTockMessageSchema(TestCase): expected = given_tock_message() schema = TockMessageSchema() result: TockMessage = schema.load(json.loads(schema.dumps(expected))) - compare(expected, result) + self.assertEqual(expected, result) if __name__ == '__main__':
using dataclass decorator for building model cf https://docs.python.org/3/library/dataclasses.html
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tock/tests/test_schemas.py::TestEntitySchema::test_json_serialization", "tock/tests/test_schemas.py::TestMessageSchema::test_json_serialization", "tock/tests/test_schemas.py::TestConnectorTypeSchema::test_json_serialization", "tock/tests/test_schemas.py::TestUserIdSchema::test_json_serialization", "tock/tests/test_schemas.py::TestUserSchema::test_json_serialization", "tock/tests/test_schemas.py::TestRequestContextSchema::test_json_serialization", "tock/tests/test_schemas.py::TestSuggestionSchema::test_json_serialization", "tock/tests/test_schemas.py::TestI18nTextSchema::test_json_serialization", "tock/tests/test_schemas.py::TestSentenceSchema::test_json_serialization", "tock/tests/test_schemas.py::TestAttachmentSchema::test_json_serialization", "tock/tests/test_schemas.py::TestActionSchema::test_json_serialization", "tock/tests/test_schemas.py::TestCardSchema::test_json_serialization", "tock/tests/test_schemas.py::TestCarouselSchema::test_json_serialization", "tock/tests/test_schemas.py::TestResponseContextSchema::test_json_serialization", "tock/tests/test_schemas.py::TestBotRequestSchema::test_json_serialization", "tock/tests/test_schemas.py::TestBotResponseSchema::test_json_serialization", "tock/tests/test_schemas.py::TestTockMessageSchema::test_json_serialization" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-09-15T20:55:54Z"
mit
thesimj__envyaml-10
diff --git a/envyaml/envyaml.py b/envyaml/envyaml.py index d02118a..e0ba807 100644 --- a/envyaml/envyaml.py +++ b/envyaml/envyaml.py @@ -30,15 +30,21 @@ RE_COMMENTS = re.compile(r"(^#.*\n)", re.MULTILINE | re.UNICODE) RE_DOT_ENV = re.compile(r"^((?!\d)[\w\- ]+=.*)$", re.MULTILINE | re.UNICODE) RE_ENV = [ - (re.compile(r"(?<=\$\{)(.*?)(?=\})", re.MULTILINE | re.UNICODE), ["${{{match}}}"]), + ( + re.compile(r"(?<=\$\{)(.*?)(?=\})", re.MULTILINE | re.UNICODE), + ["${{{match}}}"] + ), ( re.compile(r"(?<=[\"\']\$)(.*?)(?=[\"\']$)", re.MULTILINE | re.UNICODE), ['"${match}"', "'${match}'"], ), - (re.compile(r"\$(?!\d)(.*)", re.MULTILINE | re.UNICODE), ["{match}"]), + ( + re.compile(r"\$(?!\d)(.*)(?<![\s\]])", re.MULTILINE | re.UNICODE), + ["{match}"] + ), ] -__version__ = "1.1.201202" +__version__ = "1.2.201222" class EnvYAML: @@ -138,8 +144,8 @@ class EnvYAML: name, value = line.strip().split("=", 1) # type: str,str # strip names and values - name = name.strip().strip("'\"") - value = value.strip().strip("'\"") + name = name.strip().strip("'\" ") + value = value.strip().strip("'\" ") # set config config[name] = value
thesimj/envyaml
7fa3fe20524bada64bad7db2008ee872a8ccde09
diff --git a/tests/env.test.yaml b/tests/env.test.yaml index 156ae33..7f97a5c 100644 --- a/tests/env.test.yaml +++ b/tests/env.test.yaml @@ -77,6 +77,9 @@ empty: novalues: noenvvalue: $EMPTY_ENV|"" +var_in_array: + to: [ $USERNAME ] + # # Comments # diff --git a/tests/test_envyaml.py b/tests/test_envyaml.py index dba827a..b45941f 100644 --- a/tests/test_envyaml.py +++ b/tests/test_envyaml.py @@ -284,3 +284,10 @@ def test_it_should_return_proper_formatted_string(): def test_it_should_raise_exception_in_strict_mode(): with pytest.raises(ValueError): EnvYAML("tests/env.ignored.yaml") + + +def test_it_should_parser_environment_inside_array_and_object(): + env = EnvYAML("tests/env.test.yaml", env_file="tests/test.env") + + # assert array + assert env['var_in_array.to.0'] == 'env-username'
Parsing fails with inline list I had the below structure in a config file and the parsing ``` mail: to: [ $HANDLERS_SMTP_TO ] ``` I get the following error `ValueError: Strict mode enabled, variable $SMTP_HANDLER_TO] not defined!` The below works just fine though ``` mail: to: - $HANDLERS_SMTP_TO ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_envyaml.py::test_it_should_read_env_file", "tests/test_envyaml.py::test_it_should_read_custom_file", "tests/test_envyaml.py::test_it_should_get_default_values", "tests/test_envyaml.py::test_it_should_raise_key_error_when_no_values", "tests/test_envyaml.py::test_it_should_populate_env_variable", "tests/test_envyaml.py::test_it_should_return_dict_on_export", "tests/test_envyaml.py::test_it_should_convert_config_to_dict", "tests/test_envyaml.py::test_it_should_access_all_keys_in_config", "tests/test_envyaml.py::test_it_should_access_keys_and_lists", "tests/test_envyaml.py::test_it_should_read_config_from_env_variable", "tests/test_envyaml.py::test_it_should_use_default_value", "tests/test_envyaml.py::test_it_should_get_lists_values_by_number", "tests/test_envyaml.py::test_it_should_proper_handle_dollar_sign_with_number", "tests/test_envyaml.py::test_it_should_proper_complex_variable", "tests/test_envyaml.py::test_it_should_proper_complex_variable_2", "tests/test_envyaml.py::test_it_should_return_proper_formatted_string", "tests/test_envyaml.py::test_it_should_parser_environment_inside_array_and_object" ]
[ "tests/test_envyaml.py::test_it_should_return_default_value", "tests/test_envyaml.py::test_it_should_access_environment_variables", "tests/test_envyaml.py::test_it_should_fail_when_access_environment_variables", "tests/test_envyaml.py::test_it_should_access_environ", "tests/test_envyaml.py::test_it_should_read_default_file", "tests/test_envyaml.py::test_it_should_raise_exception_when_file_not_found", "tests/test_envyaml.py::test_it_should_not_fail_when_try_load_non_exist_default_file", "tests/test_envyaml.py::test_it_should_not_fail_when_try_load_default_empty_yaml_file", "tests/test_envyaml.py::test_it_should_not_fail_when_try_load_default_empty_dotenv_file", "tests/test_envyaml.py::test_it_should_be_valid_in_check", "tests/test_envyaml.py::test_it_should_be_read_if_strict_disabled", "tests/test_envyaml.py::test_it_should_raise_exception_in_strict_mode" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2020-12-22T21:23:55Z"
mit
theskumar__python-dotenv-148
diff --git a/dotenv/main.py b/dotenv/main.py index 349ec06..1a88238 100644 --- a/dotenv/main.py +++ b/dotenv/main.py @@ -2,47 +2,90 @@ from __future__ import absolute_import, print_function, unicode_literals import codecs -import fileinput import io import os import re +import shutil import sys from subprocess import Popen +import tempfile import warnings -from collections import OrderedDict +from collections import OrderedDict, namedtuple +from contextlib import contextmanager from .compat import StringIO, PY2, WIN, text_type -__escape_decoder = codecs.getdecoder('unicode_escape') -__posix_variable = re.compile('\$\{[^\}]*\}') # noqa +__posix_variable = re.compile(r'\$\{[^\}]*\}') +_binding = re.compile( + r""" + ( + \s* # leading whitespace + (?:export\s+)? # export -def decode_escaped(escaped): - return __escape_decoder(escaped)[0] + ( '[^']+' # single-quoted key + | [^=\#\s]+ # or unquoted key + )? + (?: + (?:\s*=\s*) # equal sign -def parse_line(line): - line = line.strip() + ( '(?:\\'|[^'])*' # single-quoted value + | "(?:\\"|[^"])*" # or double-quoted value + | [^\#\r\n]* # or unquoted value + ) + )? - # Ignore lines with `#` or which doesn't have `=` in it. - if not line or line.startswith('#') or '=' not in line: - return None, None + \s* # trailing whitespace + (?:\#[^\r\n]*)? # comment + (?:\r|\n|\r\n)? # newline + ) + """, + re.MULTILINE | re.VERBOSE, +) - k, v = line.split('=', 1) +_escape_sequence = re.compile(r"\\[\\'\"abfnrtv]") - if k.startswith('export '): - (_, _, k) = k.partition('export ') - # Remove any leading and trailing spaces in key, value - k, v = k.strip(), v.strip() +Binding = namedtuple('Binding', 'key value original') - if v: - v = v.encode('unicode-escape').decode('ascii') - quoted = v[0] == v[-1] in ['"', "'"] - if quoted: - v = decode_escaped(v[1:-1]) - return k, v +def decode_escapes(string): + def decode_match(match): + return codecs.decode(match.group(0), 'unicode-escape') + + return _escape_sequence.sub(decode_match, string) + + +def is_surrounded_by(string, char): + return ( + len(string) > 1 + and string[0] == string[-1] == char + ) + + +def parse_binding(string, position): + match = _binding.match(string, position) + (matched, key, value) = match.groups() + if key is None or value is None: + key = None + value = None + else: + value_quoted = is_surrounded_by(value, "'") or is_surrounded_by(value, '"') + if value_quoted: + value = decode_escapes(value[1:-1]) + else: + value = value.strip() + return (Binding(key=key, value=value, original=matched), match.end()) + + +def parse_stream(stream): + string = stream.read() + position = 0 + length = len(string) + while position < length: + (binding, position) = parse_binding(string, position) + yield binding class DotEnv(): @@ -52,19 +95,17 @@ class DotEnv(): self._dict = None self.verbose = verbose + @contextmanager def _get_stream(self): - self._is_file = False if isinstance(self.dotenv_path, StringIO): - return self.dotenv_path - - if os.path.isfile(self.dotenv_path): - self._is_file = True - return io.open(self.dotenv_path) - - if self.verbose: - warnings.warn("File doesn't exist {}".format(self.dotenv_path)) - - return StringIO('') + yield self.dotenv_path + elif os.path.isfile(self.dotenv_path): + with io.open(self.dotenv_path) as stream: + yield stream + else: + if self.verbose: + warnings.warn("File doesn't exist {}".format(self.dotenv_path)) + yield StringIO('') def dict(self): """Return dotenv as dict""" @@ -76,17 +117,10 @@ class DotEnv(): return self._dict def parse(self): - f = self._get_stream() - - for line in f: - key, value = parse_line(line) - if not key: - continue - - yield key, value - - if self._is_file: - f.close() + with self._get_stream() as stream: + for mapping in parse_stream(stream): + if mapping.key is not None and mapping.value is not None: + yield mapping.key, mapping.value def set_as_environment_variables(self, override=False): """ @@ -126,6 +160,20 @@ def get_key(dotenv_path, key_to_get): return DotEnv(dotenv_path, verbose=True).get(key_to_get) +@contextmanager +def rewrite(path): + try: + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as dest: + with io.open(path) as source: + yield (source, dest) + except BaseException: + if os.path.isfile(dest.name): + os.unlink(dest.name) + raise + else: + shutil.move(dest.name, path) + + def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"): """ Adds or Updates a key/value to the given .env @@ -141,20 +189,19 @@ def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"): if " " in value_to_set: quote_mode = "always" - line_template = '{}="{}"' if quote_mode == "always" else '{}={}' + line_template = '{}="{}"\n' if quote_mode == "always" else '{}={}\n' line_out = line_template.format(key_to_set, value_to_set) - replaced = False - for line in fileinput.input(dotenv_path, inplace=True): - k, v = parse_line(line) - if k == key_to_set: - replaced = True - line = "{}\n".format(line_out) - print(line, end='') - - if not replaced: - with io.open(dotenv_path, "a") as f: - f.write("{}\n".format(line_out)) + with rewrite(dotenv_path) as (source, dest): + replaced = False + for mapping in parse_stream(source): + if mapping.key == key_to_set: + dest.write(line_out) + replaced = True + else: + dest.write(mapping.original) + if not replaced: + dest.write(line_out) return True, key_to_set, value_to_set @@ -166,18 +213,17 @@ def unset_key(dotenv_path, key_to_unset, quote_mode="always"): If the .env path given doesn't exist, fails If the given key doesn't exist in the .env, fails """ - removed = False - if not os.path.exists(dotenv_path): warnings.warn("can't delete from %s - it doesn't exist." % dotenv_path) return None, key_to_unset - for line in fileinput.input(dotenv_path, inplace=True): - k, v = parse_line(line) - if k == key_to_unset: - removed = True - line = '' - print(line, end='') + removed = False + with rewrite(dotenv_path) as (source, dest): + for mapping in parse_stream(source): + if mapping.key == key_to_unset: + removed = True + else: + dest.write(mapping.original) if not removed: warnings.warn("key %s not removed from %s - key doesn't exist." % (key_to_unset, dotenv_path))
theskumar/python-dotenv
3b7e60e6cbdef596701c1921257ea9e48076eec3
diff --git a/tests/test_cli.py b/tests/test_cli.py index 15c47af..b594592 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- -from os import environ +import os from os.path import dirname, join +import pytest +import sh + import dotenv -from dotenv.version import __version__ from dotenv.cli import cli as dotenv_cli - -import sh +from dotenv.version import __version__ here = dirname(__file__) dotenv_path = join(here, '.env') @@ -38,6 +39,22 @@ def test_set_key(dotenv_file): with open(dotenv_file, 'r') as fp: assert 'HELLO="WORLD 2"\nfoo="bar"' == fp.read().strip() + success, key_to_set, value_to_set = dotenv.set_key(dotenv_file, "HELLO", "WORLD\n3") + + with open(dotenv_file, "r") as fp: + assert 'HELLO="WORLD\n3"\nfoo="bar"' == fp.read().strip() + + +def test_set_key_permission_error(dotenv_file): + os.chmod(dotenv_file, 0o000) + + with pytest.raises(Exception): + dotenv.set_key(dotenv_file, "HELLO", "WORLD") + + os.chmod(dotenv_file, 0o600) + with open(dotenv_file, "r") as fp: + assert fp.read() == "" + def test_list(cli, dotenv_file): success, key_to_set, value_to_set = dotenv.set_key(dotenv_file, 'HELLO', 'WORLD') @@ -59,6 +76,13 @@ def test_list_wo_file(cli): assert 'Invalid value for "-f"' in result.output +def test_empty_value(): + with open(dotenv_path, "w") as f: + f.write("TEST=") + assert dotenv.get_key(dotenv_path, "TEST") == "" + sh.rm(dotenv_path) + + def test_key_value_without_quotes(): with open(dotenv_path, 'w') as f: f.write("TEST = value \n") @@ -95,18 +119,41 @@ def test_value_with_special_characters(): sh.rm(dotenv_path) -def test_unset(): - sh.touch(dotenv_path) - success, key_to_set, value_to_set = dotenv.set_key(dotenv_path, 'HELLO', 'WORLD') - stored_value = dotenv.get_key(dotenv_path, 'HELLO') - assert stored_value == 'WORLD' - success, key_to_unset = dotenv.unset_key(dotenv_path, 'HELLO') - assert success is True - assert dotenv.get_key(dotenv_path, 'HELLO') is None - success, key_to_unset = dotenv.unset_key(dotenv_path, 'RANDOM') - assert success is None +def test_value_with_new_lines(): + with open(dotenv_path, 'w') as f: + f.write('TEST="a\nb"') + assert dotenv.get_key(dotenv_path, 'TEST') == "a\nb" + sh.rm(dotenv_path) + + with open(dotenv_path, 'w') as f: + f.write("TEST='a\nb'") + assert dotenv.get_key(dotenv_path, 'TEST') == "a\nb" + sh.rm(dotenv_path) + + +def test_value_after_comment(): + with open(dotenv_path, "w") as f: + f.write("# comment\nTEST=a") + assert dotenv.get_key(dotenv_path, "TEST") == "a" sh.rm(dotenv_path) - success, key_to_unset = dotenv.unset_key(dotenv_path, 'HELLO') + + +def test_unset_ok(dotenv_file): + with open(dotenv_file, "w") as f: + f.write("a=b\nc=d") + + success, key_to_unset = dotenv.unset_key(dotenv_file, "a") + + assert success is True + assert key_to_unset == "a" + with open(dotenv_file, "r") as f: + assert f.read() == "c=d" + sh.rm(dotenv_file) + + +def test_unset_non_existing_file(): + success, key_to_unset = dotenv.unset_key('/non-existing', 'HELLO') + assert success is None @@ -180,7 +227,7 @@ def test_get_key_with_interpolation(cli): stored_value = dotenv.get_key(dotenv_path, 'BAR') assert stored_value == 'CONCATENATED_WORLD_POSIX_VAR' # test replace from environ taking precedence over file - environ["HELLO"] = "TAKES_PRECEDENCE" + os.environ["HELLO"] = "TAKES_PRECEDENCE" stored_value = dotenv.get_key(dotenv_path, 'FOO') assert stored_value == "TAKES_PRECEDENCE" sh.rm(dotenv_path) @@ -194,10 +241,10 @@ def test_get_key_with_interpolation_of_unset_variable(cli): stored_value = dotenv.get_key(dotenv_path, 'FOO') assert stored_value == '' # unless present in environment - environ['NOT_SET'] = 'BAR' + os.environ['NOT_SET'] = 'BAR' stored_value = dotenv.get_key(dotenv_path, 'FOO') assert stored_value == 'BAR' - del(environ['NOT_SET']) + del(os.environ['NOT_SET']) sh.rm(dotenv_path) diff --git a/tests/test_core.py b/tests/test_core.py index 45a1f86..bda2e3b 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -9,7 +9,7 @@ import warnings import sh from dotenv import load_dotenv, find_dotenv, set_key, dotenv_values -from dotenv.main import parse_line +from dotenv.main import Binding, parse_stream from dotenv.compat import StringIO from IPython.terminal.embed import InteractiveShellEmbed @@ -25,21 +25,71 @@ def restore_os_environ(): @pytest.mark.parametrize("test_input,expected", [ - ("a=b", ("a", "b")), - (" a = b ", ("a", "b")), - ("export a=b", ("a", "b")), - (" export 'a'=b", ("'a'", "b")), - (" export 'a'=b", ("'a'", "b")), - ("# a=b", (None, None)), - ("# a=b", (None, None)), - ("a=b space ", ('a', 'b space')), - ("a='b space '", ('a', 'b space ')), - ('a="b space "', ('a', 'b space ')), - ("export export_spam=1", ("export_spam", "1")), - ("export port=8000", ("port", "8000")), + ("", []), + ("a=b", [Binding(key="a", value="b", original="a=b")]), + ("'a'=b", [Binding(key="'a'", value="b", original="'a'=b")]), + ("[=b", [Binding(key="[", value="b", original="[=b")]), + (" a = b ", [Binding(key="a", value="b", original=" a = b ")]), + ("export a=b", [Binding(key="a", value="b", original="export a=b")]), + (" export 'a'=b", [Binding(key="'a'", value="b", original=" export 'a'=b")]), + (" export 'a'=b", [Binding(key="'a'", value="b", original=" export 'a'=b")]), + ("# a=b", [Binding(key=None, value=None, original="# a=b")]), + ('a=b # comment', [Binding(key="a", value="b", original="a=b # comment")]), + ("a=b space ", [Binding(key="a", value="b space", original="a=b space ")]), + ("a='b space '", [Binding(key="a", value="b space ", original="a='b space '")]), + ('a="b space "', [Binding(key="a", value="b space ", original='a="b space "')]), + ("export export_a=1", [Binding(key="export_a", value="1", original="export export_a=1")]), + ("export port=8000", [Binding(key="port", value="8000", original="export port=8000")]), + ('a="b\nc"', [Binding(key="a", value="b\nc", original='a="b\nc"')]), + ("a='b\nc'", [Binding(key="a", value="b\nc", original="a='b\nc'")]), + ('a="b\nc"', [Binding(key="a", value="b\nc", original='a="b\nc"')]), + ('a="b\\nc"', [Binding(key="a", value='b\nc', original='a="b\\nc"')]), + ('a="b\\"c"', [Binding(key="a", value='b"c', original='a="b\\"c"')]), + ("a='b\\'c'", [Binding(key="a", value="b'c", original="a='b\\'c'")]), + ("a=à", [Binding(key="a", value="à", original="a=à")]), + ('a="à"', [Binding(key="a", value="à", original='a="à"')]), + ('garbage', [Binding(key=None, value=None, original="garbage")]), + ( + "a=b\nc=d", + [ + Binding(key="a", value="b", original="a=b\n"), + Binding(key="c", value="d", original="c=d"), + ], + ), + ( + "a=b\r\nc=d", + [ + Binding(key="a", value="b", original="a=b\r\n"), + Binding(key="c", value="d", original="c=d"), + ], + ), + ( + 'a="\nb=c', + [ + Binding(key="a", value='"', original='a="\n'), + Binding(key="b", value='c', original="b=c"), + ] + ), + ( + '# comment\na="b\nc"\nd=e\n', + [ + Binding(key=None, value=None, original="# comment\n"), + Binding(key="a", value="b\nc", original='a="b\nc"\n'), + Binding(key="d", value="e", original="d=e\n"), + ], + ), + ( + 'garbage[%$#\na=b', + [ + Binding(key=None, value=None, original="garbage[%$#\n"), + Binding(key="a", value="b", original='a=b'), + ], + ), ]) -def test_parse_line(test_input, expected): - assert parse_line(test_input) == expected +def test_parse_stream(test_input, expected): + result = parse_stream(StringIO(test_input)) + + assert list(result) == expected def test_warns_if_file_does_not_exist():
Cannot get multiline strings to work? According to the documentation `TEST="foo\nbar"` should produce a multiline environment variable however I cannot get it to work, what am I doing wrong? I have tried every variation I can think of. **.env** ``` TEST1="foo\nbar" TEST2=foo\nbar TEST3="foo bar" TEST4="foo\\nbar" TEST5=foo\\nbar TEST6=foo bar TEST7="foo\ bar" TEST8=foo\ bar ``` **test.py** ``` import os from os.path import join, dirname from dotenv import load_dotenv dotenv_path = join(dirname(__file__), '.env') load_dotenv(dotenv_path, verbose=True) tests = ( (key, value) for key, value in os.environ.items() if key.startswith('TEST')) for key, value in tests: print(key, value) ``` **output** ``` TEST1 foo\nbar TEST2 foo\\nbar TEST3 "foo TEST4 foo\\nbar TEST5 foo\\\\nbar TEST6 foo TEST7 "foo\\ TEST8 foo\\ ``` Trying with "real" environment variables through bash: ``` $ export TEST="foo bar" $ python -c "import os; print(os.environ['TEST'])" foo bar ``` **using** Darwin Kernel Version 15.6.0 Python 3.6.2 python-dotenv==0.7.1
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_cli.py::test_get_key", "tests/test_cli.py::test_set_key", "tests/test_cli.py::test_list", "tests/test_cli.py::test_get_cli", "tests/test_cli.py::test_empty_value", "tests/test_cli.py::test_key_value_without_quotes", "tests/test_cli.py::test_value_with_quotes", "tests/test_cli.py::test_value_with_special_characters", "tests/test_cli.py::test_value_with_new_lines", "tests/test_cli.py::test_value_after_comment", "tests/test_cli.py::test_unset_ok", "tests/test_cli.py::test_unset_non_existing_file", "tests/test_cli.py::test_unset_cli", "tests/test_cli.py::test_get_key_with_interpolation", "tests/test_cli.py::test_get_key_with_interpolation_of_unset_variable", "tests/test_cli.py::test_run_without_cmd", "tests/test_cli.py::test_run_with_invalid_cmd", "tests/test_cli.py::test_run_with_version", "tests/test_core.py::test_parse_stream[-expected0]", "tests/test_core.py::test_parse_stream[a=b-expected1]", "tests/test_core.py::test_parse_stream['a'=b-expected2]", "tests/test_core.py::test_parse_stream[[=b-expected3]", "tests/test_core.py::test_parse_stream[", "tests/test_core.py::test_parse_stream[export", "tests/test_core.py::test_parse_stream[#", "tests/test_core.py::test_parse_stream[a=b", "tests/test_core.py::test_parse_stream[a='b", "tests/test_core.py::test_parse_stream[a=\"b", "tests/test_core.py::test_parse_stream[a=\"b\\nc\"-expected15]", "tests/test_core.py::test_parse_stream[a='b\\nc'-expected16]", "tests/test_core.py::test_parse_stream[a=\"b\\nc\"-expected17]", "tests/test_core.py::test_parse_stream[a=\"b\\\\nc\"-expected18]", "tests/test_core.py::test_parse_stream[a=\"b\\\\\"c\"-expected19]", "tests/test_core.py::test_parse_stream[a='b\\\\'c'-expected20]", "tests/test_core.py::test_parse_stream[a=\\xe0-expected21]", "tests/test_core.py::test_parse_stream[a=\"\\xe0\"-expected22]", "tests/test_core.py::test_parse_stream[garbage-expected23]", "tests/test_core.py::test_parse_stream[a=b\\nc=d-expected24]", "tests/test_core.py::test_parse_stream[a=b\\r\\nc=d-expected25]", "tests/test_core.py::test_parse_stream[a=\"\\nb=c-expected26]", "tests/test_core.py::test_parse_stream[garbage[%$#\\na=b-expected28]", "tests/test_core.py::test_warns_if_file_does_not_exist", "tests/test_core.py::test_find_dotenv", "tests/test_core.py::test_load_dotenv", "tests/test_core.py::test_load_dotenv_override", "tests/test_core.py::test_load_dotenv_in_current_dir", "tests/test_core.py::test_ipython", "tests/test_core.py::test_ipython_override", "tests/test_core.py::test_dotenv_values_stream", "tests/test_core.py::test_dotenv_values_export", "tests/test_core.py::test_dotenv_empty_selfreferential_interpolation", "tests/test_core.py::test_dotenv_nonempty_selfreferential_interpolation" ]
[]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-10-28T14:54:09Z"
bsd-3-clause
theskumar__python-dotenv-158
diff --git a/dotenv/main.py b/dotenv/main.py index 1a88238..98b22ec 100644 --- a/dotenv/main.py +++ b/dotenv/main.py @@ -21,14 +21,14 @@ _binding = re.compile( r""" ( \s* # leading whitespace - (?:export\s+)? # export + (?:export{0}+)? # export ( '[^']+' # single-quoted key | [^=\#\s]+ # or unquoted key )? (?: - (?:\s*=\s*) # equal sign + (?:{0}*={0}*) # equal sign ( '(?:\\'|[^'])*' # single-quoted value | "(?:\\"|[^"])*" # or double-quoted value @@ -40,7 +40,7 @@ _binding = re.compile( (?:\#[^\r\n]*)? # comment (?:\r|\n|\r\n)? # newline ) - """, + """.format(r'[^\S\r\n]'), re.MULTILINE | re.VERBOSE, )
theskumar/python-dotenv
3daef30cbb392d0b57e70cf2e28814496c3cf5e9
diff --git a/tests/test_core.py b/tests/test_core.py index bda2e3b..f797600 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -63,6 +63,13 @@ def restore_os_environ(): Binding(key="c", value="d", original="c=d"), ], ), + ( + 'a=\nb=c', + [ + Binding(key="a", value='', original='a=\n'), + Binding(key="b", value='c', original="b=c"), + ] + ), ( 'a="\nb=c', [
Regression 0.9.1 -> 0.10.0: .env parsed incorrectly Hello! I have upgraded dotenv from 0.9.1 to 0.10 and now my config file is parsed incorrectly. Example file: ``` $ cat .env VAR_A= VAR_B=123 ``` Current behaviour on 0.10: ``` $ dotenv list VAR_A=VAR_B=123 ``` Previous (correct) behaviour on 0.9: ``` $ dotenv list VAR_A= VAR_B=123 ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_core.py::test_parse_stream[a=\\nb=c-expected26]" ]
[ "tests/test_core.py::test_parse_stream[-expected0]", "tests/test_core.py::test_parse_stream[a=b-expected1]", "tests/test_core.py::test_parse_stream['a'=b-expected2]", "tests/test_core.py::test_parse_stream[[=b-expected3]", "tests/test_core.py::test_parse_stream[", "tests/test_core.py::test_parse_stream[export", "tests/test_core.py::test_parse_stream[#", "tests/test_core.py::test_parse_stream[a=b", "tests/test_core.py::test_parse_stream[a='b", "tests/test_core.py::test_parse_stream[a=\"b", "tests/test_core.py::test_parse_stream[a=\"b\\nc\"-expected15]", "tests/test_core.py::test_parse_stream[a='b\\nc'-expected16]", "tests/test_core.py::test_parse_stream[a=\"b\\nc\"-expected17]", "tests/test_core.py::test_parse_stream[a=\"b\\\\nc\"-expected18]", "tests/test_core.py::test_parse_stream[a=\"b\\\\\"c\"-expected19]", "tests/test_core.py::test_parse_stream[a='b\\\\'c'-expected20]", "tests/test_core.py::test_parse_stream[a=\\xe0-expected21]", "tests/test_core.py::test_parse_stream[a=\"\\xe0\"-expected22]", "tests/test_core.py::test_parse_stream[garbage-expected23]", "tests/test_core.py::test_parse_stream[a=b\\nc=d-expected24]", "tests/test_core.py::test_parse_stream[a=b\\r\\nc=d-expected25]", "tests/test_core.py::test_parse_stream[a=\"\\nb=c-expected27]", "tests/test_core.py::test_parse_stream[garbage[%$#\\na=b-expected29]", "tests/test_core.py::test_warns_if_file_does_not_exist", "tests/test_core.py::test_find_dotenv", "tests/test_core.py::test_load_dotenv", "tests/test_core.py::test_load_dotenv_override", "tests/test_core.py::test_load_dotenv_in_current_dir", "tests/test_core.py::test_ipython", "tests/test_core.py::test_ipython_override", "tests/test_core.py::test_dotenv_values_stream", "tests/test_core.py::test_dotenv_values_export", "tests/test_core.py::test_dotenv_empty_selfreferential_interpolation", "tests/test_core.py::test_dotenv_nonempty_selfreferential_interpolation" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2018-12-14T08:36:38Z"
bsd-3-clause
theskumar__python-dotenv-180
diff --git a/README.md b/README.md index 64fa64f..7ca6477 100644 --- a/README.md +++ b/README.md @@ -299,8 +299,9 @@ Changelog Unreleased ----- -- ... - +- Refactor parser to fix parsing inconsistencies ([@bbc2])([#170]). + - Interpret escapes as control characters only in double-quoted strings. + - Interpret `#` as start of comment only if preceded by whitespace. 0.10.2 ----- @@ -428,6 +429,7 @@ Unreleased [#172]: https://github.com/theskumar/python-dotenv/issues/172 [#121]: https://github.com/theskumar/python-dotenv/issues/121 [#176]: https://github.com/theskumar/python-dotenv/issues/176 +[#170]: https://github.com/theskumar/python-dotenv/issues/170 [@asyncee]: https://github.com/asyncee [@greyli]: https://github.com/greyli diff --git a/setup.cfg b/setup.cfg index 7f78459..f0847b3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,6 +5,9 @@ universal = 1 max-line-length = 120 exclude = .tox,.git,docs,venv,.venv +[mypy] +ignore_missing_imports = true + [metadata] description-file = README.rst diff --git a/src/dotenv/compat.py b/src/dotenv/compat.py index 99ffb39..1a14534 100644 --- a/src/dotenv/compat.py +++ b/src/dotenv/compat.py @@ -1,4 +1,5 @@ import sys +from typing import Text if sys.version_info >= (3, 0): from io import StringIO # noqa @@ -6,3 +7,17 @@ else: from StringIO import StringIO # noqa PY2 = sys.version_info[0] == 2 # type: bool + + +def to_text(string): + # type: (str) -> Text + """ + Make a string Unicode if it isn't already. + + This is useful for defining raw unicode strings because `ur"foo"` isn't valid in + Python 3. + """ + if PY2: + return string.decode("utf-8") + else: + return string diff --git a/src/dotenv/main.py b/src/dotenv/main.py index 0812282..5b619b1 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals -import codecs import io import os import re @@ -9,13 +8,14 @@ import shutil import sys from subprocess import Popen import tempfile -from typing import (Any, Dict, Iterator, List, Match, NamedTuple, Optional, # noqa - Pattern, Union, TYPE_CHECKING, Text, IO, Tuple) # noqa +from typing import (Dict, Iterator, List, Match, Optional, # noqa + Pattern, Union, TYPE_CHECKING, Text, IO, Tuple) import warnings from collections import OrderedDict from contextlib import contextmanager from .compat import StringIO, PY2 +from .parser import parse_stream if TYPE_CHECKING: # pragma: no cover if sys.version_info >= (3, 6): @@ -30,84 +30,6 @@ if TYPE_CHECKING: # pragma: no cover __posix_variable = re.compile(r'\$\{[^\}]*\}') # type: Pattern[Text] -_binding = re.compile( - r""" - ( - \s* # leading whitespace - (?:export{0}+)? # export - - ( '[^']+' # single-quoted key - | [^=\#\s]+ # or unquoted key - )? - - (?: - (?:{0}*={0}*) # equal sign - - ( '(?:\\'|[^'])*' # single-quoted value - | "(?:\\"|[^"])*" # or double-quoted value - | [^\#\r\n]* # or unquoted value - ) - )? - - \s* # trailing whitespace - (?:\#[^\r\n]*)? # comment - (?:\r|\n|\r\n)? # newline - ) - """.format(r'[^\S\r\n]'), - re.MULTILINE | re.VERBOSE, -) # type: Pattern[Text] - -_escape_sequence = re.compile(r"\\[\\'\"abfnrtv]") # type: Pattern[Text] - - -Binding = NamedTuple("Binding", [("key", Optional[Text]), - ("value", Optional[Text]), - ("original", Text)]) - - -def decode_escapes(string): - # type: (Text) -> Text - def decode_match(match): - # type: (Match[Text]) -> Text - return codecs.decode(match.group(0), 'unicode-escape') # type: ignore - - return _escape_sequence.sub(decode_match, string) - - -def is_surrounded_by(string, char): - # type: (Text, Text) -> bool - return ( - len(string) > 1 - and string[0] == string[-1] == char - ) - - -def parse_binding(string, position): - # type: (Text, int) -> Tuple[Binding, int] - match = _binding.match(string, position) - assert match is not None - (matched, key, value) = match.groups() - if key is None or value is None: - key = None - value = None - else: - value_quoted = is_surrounded_by(value, "'") or is_surrounded_by(value, '"') - if value_quoted: - value = decode_escapes(value[1:-1]) - else: - value = value.strip() - return (Binding(key=key, value=value, original=matched), match.end()) - - -def parse_stream(stream): - # type:(IO[Text]) -> Iterator[Binding] - string = stream.read() - position = 0 - length = len(string) - while position < length: - (binding, position) = parse_binding(string, position) - yield binding - def to_env(text): # type: (Text) -> str diff --git a/src/dotenv/parser.py b/src/dotenv/parser.py new file mode 100644 index 0000000..b63cb3a --- /dev/null +++ b/src/dotenv/parser.py @@ -0,0 +1,147 @@ +import codecs +import re +from typing import (IO, Iterator, Match, NamedTuple, Optional, Pattern, # noqa + Sequence, Text) + +from .compat import to_text + + +def make_regex(string, extra_flags=0): + # type: (str, int) -> Pattern[Text] + return re.compile(to_text(string), re.UNICODE | extra_flags) + + +_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE) +_export = make_regex(r"(?:export[^\S\r\n]+)?") +_single_quoted_key = make_regex(r"'([^']+)'") +_unquoted_key = make_regex(r"([^=\#\s]+)") +_equal_sign = make_regex(r"[^\S\r\n]*=[^\S\r\n]*") +_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'") +_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"') +_unquoted_value_part = make_regex(r"([^ \r\n]*)") +_comment = make_regex(r"(?:\s*#[^\r\n]*)?") +_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r)?") +_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?") +_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]") +_single_quote_escapes = make_regex(r"\\[\\']") + +Binding = NamedTuple("Binding", [("key", Optional[Text]), + ("value", Optional[Text]), + ("original", Text)]) + + +class Error(Exception): + pass + + +class Reader: + def __init__(self, stream): + # type: (IO[Text]) -> None + self.string = stream.read() + self.position = 0 + self.mark = 0 + + def has_next(self): + # type: () -> bool + return self.position < len(self.string) + + def set_mark(self): + # type: () -> None + self.mark = self.position + + def get_marked(self): + # type: () -> Text + return self.string[self.mark:self.position] + + def peek(self, count): + # type: (int) -> Text + return self.string[self.position:self.position + count] + + def read(self, count): + # type: (int) -> Text + result = self.string[self.position:self.position + count] + if len(result) < count: + raise Error("read: End of string") + self.position += count + return result + + def read_regex(self, regex): + # type: (Pattern[Text]) -> Sequence[Text] + match = regex.match(self.string, self.position) + if match is None: + raise Error("read_regex: Pattern not found") + self.position = match.end() + return match.groups() + + +def decode_escapes(regex, string): + # type: (Pattern[Text], Text) -> Text + def decode_match(match): + # type: (Match[Text]) -> Text + return codecs.decode(match.group(0), 'unicode-escape') # type: ignore + + return regex.sub(decode_match, string) + + +def parse_key(reader): + # type: (Reader) -> Text + char = reader.peek(1) + if char == "'": + (key,) = reader.read_regex(_single_quoted_key) + else: + (key,) = reader.read_regex(_unquoted_key) + return key + + +def parse_unquoted_value(reader): + # type: (Reader) -> Text + value = u"" + while True: + (part,) = reader.read_regex(_unquoted_value_part) + value += part + after = reader.peek(2) + if len(after) < 2 or after[0] in u"\r\n" or after[1] in u" #\r\n": + return value + value += reader.read(2) + + +def parse_value(reader): + # type: (Reader) -> Text + char = reader.peek(1) + if char == u"'": + (value,) = reader.read_regex(_single_quoted_value) + return decode_escapes(_single_quote_escapes, value) + elif char == u'"': + (value,) = reader.read_regex(_double_quoted_value) + return decode_escapes(_double_quote_escapes, value) + elif char in (u"", u"\n", u"\r"): + return u"" + else: + return parse_unquoted_value(reader) + + +def parse_binding(reader): + # type: (Reader) -> Binding + reader.set_mark() + try: + reader.read_regex(_whitespace) + reader.read_regex(_export) + key = parse_key(reader) + reader.read_regex(_equal_sign) + value = parse_value(reader) + reader.read_regex(_comment) + reader.read_regex(_end_of_line) + return Binding(key=key, value=value, original=reader.get_marked()) + except Error: + reader.read_regex(_rest_of_line) + return Binding(key=None, value=None, original=reader.get_marked()) + + +def parse_stream(stream): + # type:(IO[Text]) -> Iterator[Binding] + reader = Reader(stream) + while reader.has_next(): + try: + yield parse_binding(reader) + except Error: + return diff --git a/tox.ini b/tox.ini index 56c8732..077780f 100644 --- a/tox.ini +++ b/tox.ini @@ -19,11 +19,11 @@ deps = mypy commands = flake8 src tests - mypy --python-version=3.7 src - mypy --python-version=3.6 src - mypy --python-version=3.5 src - mypy --python-version=3.4 src - mypy --python-version=2.7 src + mypy --python-version=3.7 src tests + mypy --python-version=3.6 src tests + mypy --python-version=3.5 src tests + mypy --python-version=3.4 src tests + mypy --python-version=2.7 src tests [testenv:manifest] deps = check-manifest
theskumar/python-dotenv
73124de45fcc21010ed9e90d4e5f576ba018496b
diff --git a/tests/test_core.py b/tests/test_core.py index daf0f59..349c58b 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -13,7 +13,6 @@ from IPython.terminal.embed import InteractiveShellEmbed from dotenv import dotenv_values, find_dotenv, load_dotenv, set_key from dotenv.compat import PY2, StringIO -from dotenv.main import Binding, parse_stream @contextlib.contextmanager @@ -26,81 +25,6 @@ def restore_os_environ(): os.environ.update(environ) [email protected]("test_input,expected", [ - ("", []), - ("a=b", [Binding(key="a", value="b", original="a=b")]), - ("'a'=b", [Binding(key="'a'", value="b", original="'a'=b")]), - ("[=b", [Binding(key="[", value="b", original="[=b")]), - (" a = b ", [Binding(key="a", value="b", original=" a = b ")]), - ("export a=b", [Binding(key="a", value="b", original="export a=b")]), - (" export 'a'=b", [Binding(key="'a'", value="b", original=" export 'a'=b")]), - (" export 'a'=b", [Binding(key="'a'", value="b", original=" export 'a'=b")]), - ("# a=b", [Binding(key=None, value=None, original="# a=b")]), - ('a=b # comment', [Binding(key="a", value="b", original="a=b # comment")]), - ("a=b space ", [Binding(key="a", value="b space", original="a=b space ")]), - ("a='b space '", [Binding(key="a", value="b space ", original="a='b space '")]), - ('a="b space "', [Binding(key="a", value="b space ", original='a="b space "')]), - ("export export_a=1", [Binding(key="export_a", value="1", original="export export_a=1")]), - ("export port=8000", [Binding(key="port", value="8000", original="export port=8000")]), - ('a="b\nc"', [Binding(key="a", value="b\nc", original='a="b\nc"')]), - ("a='b\nc'", [Binding(key="a", value="b\nc", original="a='b\nc'")]), - ('a="b\nc"', [Binding(key="a", value="b\nc", original='a="b\nc"')]), - ('a="b\\nc"', [Binding(key="a", value='b\nc', original='a="b\\nc"')]), - ('a="b\\"c"', [Binding(key="a", value='b"c', original='a="b\\"c"')]), - ("a='b\\'c'", [Binding(key="a", value="b'c", original="a='b\\'c'")]), - ("a=à", [Binding(key="a", value="à", original="a=à")]), - ('a="à"', [Binding(key="a", value="à", original='a="à"')]), - ('garbage', [Binding(key=None, value=None, original="garbage")]), - ( - "a=b\nc=d", - [ - Binding(key="a", value="b", original="a=b\n"), - Binding(key="c", value="d", original="c=d"), - ], - ), - ( - "a=b\r\nc=d", - [ - Binding(key="a", value="b", original="a=b\r\n"), - Binding(key="c", value="d", original="c=d"), - ], - ), - ( - 'a=\nb=c', - [ - Binding(key="a", value='', original='a=\n'), - Binding(key="b", value='c', original="b=c"), - ] - ), - ( - 'a="\nb=c', - [ - Binding(key="a", value='"', original='a="\n'), - Binding(key="b", value='c', original="b=c"), - ] - ), - ( - '# comment\na="b\nc"\nd=e\n', - [ - Binding(key=None, value=None, original="# comment\n"), - Binding(key="a", value="b\nc", original='a="b\nc"\n'), - Binding(key="d", value="e", original="d=e\n"), - ], - ), - ( - 'garbage[%$#\na=b', - [ - Binding(key=None, value=None, original="garbage[%$#\n"), - Binding(key="a", value="b", original='a=b'), - ], - ), -]) -def test_parse_stream(test_input, expected): - result = parse_stream(StringIO(test_input)) - - assert list(result) == expected - - def test_warns_if_file_does_not_exist(): with warnings.catch_warnings(record=True) as w: load_dotenv('.does_not_exist', verbose=True) diff --git a/tests/test_parser.py b/tests/test_parser.py new file mode 100644 index 0000000..f191f90 --- /dev/null +++ b/tests/test_parser.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +import pytest + +from dotenv.compat import StringIO +from dotenv.parser import Binding, parse_stream + + [email protected]("test_input,expected", [ + (u"", []), + (u"a=b", [Binding(key=u"a", value=u"b", original=u"a=b")]), + (u"'a'=b", [Binding(key=u"a", value=u"b", original=u"'a'=b")]), + (u"[=b", [Binding(key=u"[", value=u"b", original=u"[=b")]), + (u" a = b ", [Binding(key=u"a", value=u"b", original=u" a = b ")]), + (u"export a=b", [Binding(key=u"a", value=u"b", original=u"export a=b")]), + (u" export 'a'=b", [Binding(key=u"a", value=u"b", original=u" export 'a'=b")]), + (u"# a=b", [Binding(key=None, value=None, original=u"# a=b")]), + (u"a=b#c", [Binding(key=u"a", value=u"b#c", original=u"a=b#c")]), + (u'a=b # comment', [Binding(key=u"a", value=u"b", original=u"a=b # comment")]), + (u"a=b space ", [Binding(key=u"a", value=u"b space", original=u"a=b space ")]), + (u"a='b space '", [Binding(key=u"a", value=u"b space ", original=u"a='b space '")]), + (u'a="b space "', [Binding(key=u"a", value=u"b space ", original=u'a="b space "')]), + (u"export export_a=1", [Binding(key=u"export_a", value=u"1", original=u"export export_a=1")]), + (u"export port=8000", [Binding(key=u"port", value=u"8000", original=u"export port=8000")]), + (u'a="b\nc"', [Binding(key=u"a", value=u"b\nc", original=u'a="b\nc"')]), + (u"a='b\nc'", [Binding(key=u"a", value=u"b\nc", original=u"a='b\nc'")]), + (u'a="b\nc"', [Binding(key=u"a", value=u"b\nc", original=u'a="b\nc"')]), + (u'a="b\\nc"', [Binding(key=u"a", value=u'b\nc', original=u'a="b\\nc"')]), + (u"a='b\\nc'", [Binding(key=u"a", value=u'b\\nc', original=u"a='b\\nc'")]), + (u'a="b\\"c"', [Binding(key=u"a", value=u'b"c', original=u'a="b\\"c"')]), + (u"a='b\\'c'", [Binding(key=u"a", value=u"b'c", original=u"a='b\\'c'")]), + (u"a=à", [Binding(key=u"a", value=u"à", original=u"a=à")]), + (u'a="à"', [Binding(key=u"a", value=u"à", original=u'a="à"')]), + (u'garbage', [Binding(key=None, value=None, original=u"garbage")]), + ( + u"a=b\nc=d", + [ + Binding(key=u"a", value=u"b", original=u"a=b\n"), + Binding(key=u"c", value=u"d", original=u"c=d"), + ], + ), + ( + u"a=b\r\nc=d", + [ + Binding(key=u"a", value=u"b", original=u"a=b\r\n"), + Binding(key=u"c", value=u"d", original=u"c=d"), + ], + ), + ( + u'a=\nb=c', + [ + Binding(key=u"a", value=u'', original=u'a=\n'), + Binding(key=u"b", value=u'c', original=u"b=c"), + ] + ), + ( + u'a=b\n\nc=d', + [ + Binding(key=u"a", value=u"b", original=u"a=b\n"), + Binding(key=u"c", value=u"d", original=u"\nc=d"), + ] + ), + ( + u'a="\nb=c', + [ + Binding(key=None, value=None, original=u'a="\n'), + Binding(key=u"b", value=u"c", original=u"b=c"), + ] + ), + ( + u'# comment\na="b\nc"\nd=e\n', + [ + Binding(key=None, value=None, original=u"# comment\n"), + Binding(key=u"a", value=u"b\nc", original=u'a="b\nc"\n'), + Binding(key=u"d", value=u"e", original=u"d=e\n"), + ], + ), + ( + u'garbage[%$#\na=b', + [ + Binding(key=None, value=None, original=u"garbage[%$#\n"), + Binding(key=u"a", value=u"b", original=u'a=b'), + ], + ), +]) +def test_parse_stream(test_input, expected): + result = parse_stream(StringIO(test_input)) + + assert list(result) == expected
Breaking changes in 0.10.0 and future changes As reported by several users in https://github.com/theskumar/python-dotenv/pull/148, version 0.10.0 changed how some .env files are parsed in a breaking way, especially with regards to escapes. I think some of those changes were necessary, but some other may have been unexpected. ## Experiment It was unclear what exactly broke and whether Python-dotenv is consistent with other parsers, so I ran an experiment to compare versions and packages. The scripts to generate these tables are in [bbc2/dotenv-parser-comparisons](https://github.com/bbc2/dotenv-parser-comparisons). I may update them if I find new interesting behavior. ### Basic ``` foo=ab ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `b` | | python-dotenv-0.10.1 | `a` `b` | | bash-5.0.0 | `a` `b` | | js-dotenv-6.2.0 | `a` `b` | | ruby-dotenv-2.6.0 | `a` `b` | ### Escaped `z` ``` foo=a\zb ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `\` `\` `z` `b` | | python-dotenv-0.10.1 | `a` `\` `z` `b` | | bash-5.0.0 | `a` `z` `b` | | js-dotenv-6.2.0 | `a` `\` `z` `b` | | ruby-dotenv-2.6.0 | `a` `\` `z` `b` | ### Escaped and single-quoted `z` ``` foo='a\zb' ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `\` `z` `b` | | python-dotenv-0.10.1 | `a` `\` `z` `b` | | bash-5.0.0 | `a` `\` `z` `b` | | js-dotenv-6.2.0 | `a` `\` `z` `b` | | ruby-dotenv-2.6.0 | `a` `\` `z` `b` | ### Escaped and double-quoted `z` ``` foo="a\zb" ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `\` `z` `b` | | python-dotenv-0.10.1 | `a` `\` `z` `b` | | bash-5.0.0 | `a` `\` `z` `b` | | js-dotenv-6.2.0 | `a` `\` `z` `b` | | ruby-dotenv-2.6.0 | `a` `z` `b` | ### Escaped `n` ``` foo=a\nb ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `\` `\` `n` `b` | | python-dotenv-0.10.1 | `a` `\` `n` `b` | | bash-5.0.0 | `a` `n` `b` | | js-dotenv-6.2.0 | `a` `\` `n` `b` | | ruby-dotenv-2.6.0 | `a` `\` `n` `b` | ### Escaped and single-quoted `n` ``` foo='a\nb' ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `\` `n` `b` | | python-dotenv-0.10.1 | `a` `\n` `b` | | bash-5.0.0 | `a` `\` `n` `b` | | js-dotenv-6.2.0 | `a` `\` `n` `b` | | ruby-dotenv-2.6.0 | `a` `\` `n` `b` | ### Escaped and double-quoted `n` ``` foo="a\nb" ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `\` `n` `b` | | python-dotenv-0.10.1 | `a` `\n` `b` | | bash-5.0.0 | `a` `\` `n` `b` | | js-dotenv-6.2.0 | `a` `\n` `b` | | ruby-dotenv-2.6.0 | `a` `\n` `b` | ### Quoted newline ``` foo="a b" ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `"` `a` | | python-dotenv-0.10.1 | `a` `\n` `b` | | bash-5.0.0 | `a` `\n` `b` | | js-dotenv-6.2.0 | `a` | | ruby-dotenv-2.6.0 | `a` `\n` `b` | ### Non-escaped space ``` foo=a b ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` ` ` `b` | | python-dotenv-0.10.1 | `a` ` ` `b` | | bash-5.0.0 | `a` | | js-dotenv-6.2.0 | `a` ` ` `b` | | ruby-dotenv-2.6.0 | `a` ` ` `b` | ### Non-escaped `#` ``` foo=a#b ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `#` `b` | | python-dotenv-0.10.1 | `a` | | bash-5.0.0 | `a` `#` `b` | | js-dotenv-6.2.0 | `a` `#` `b` | | ruby-dotenv-2.6.0 | `a` | ### Non-escaped spaced `#` ``` foo=a #b ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` ` ` `#` `b` | | python-dotenv-0.10.1 | `a` | | bash-5.0.0 | `a` | | js-dotenv-6.2.0 | `a` ` ` `#` `b` | | ruby-dotenv-2.6.0 | `a` | ### Escaped `#` ``` foo="a#b" ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `a` `#` `b` | | python-dotenv-0.10.1 | `a` `#` `b` | | bash-5.0.0 | `a` `#` `b` | | js-dotenv-6.2.0 | `a` `#` `b` | | ruby-dotenv-2.6.0 | `a` `#` `b` | ### UTF-8 ``` foo=é ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `\` `x` `e` `9` | | python-dotenv-0.10.1 | `é` | | bash-5.0.0 | `é` | | js-dotenv-6.2.0 | `é` | | ruby-dotenv-2.6.0 | `é` | ### Quoted UTF-8 ``` foo="é" ``` | parser | output | |--|--| | python-dotenv-0.9.1 | `é` | | python-dotenv-0.10.1 | `é` | | bash-5.0.0 | `é` | | js-dotenv-6.2.0 | `é` | | ruby-dotenv-2.6.0 | `é` | ## Conclusion 1. Non-quoted escapes (valid or invalid): All up-to-date parsers except Bash do the same thing. Bash removes the `\`. 0.9.1 added a `\`, which was obviously incorrect. 2. Single-quoted invalid escapes: Everything is fine. 3. Single-quoted valid escapes: 0.10.1 is the only parser that interprets them as control characters. 4. Double-quoted invalid escapes: It's fine except for Ruby. 5. Double-quoted valid escapes: All up-to-date parsers except Bash do the same thing. Bash and 0.9.1 keep the original characters instead of interpreting them as control characters. 6. Pound sign `#`: Interpreted as a comment delimiter since 0.10.0 if unquoted, even if there is no whitespace preceding it. When quoted or prefixed with whitespace, everything is fine except for JavaScript. 7. Non-quoted UTF-8: Fixed in 0.10.0. When quoted, everything is fine. 8. Non-escaped space: Only Bash ignores everything after it (or treats the rest as a command). Other parsers include everything until the end of the line. My opinion: * (2, 4, 7) are OK. * (1, 5, 8) are where Bash differs from other parsers. It isn't obvious what we should do. * (3, 6) are where python-dotenv 0.10.0 is quite obviously broken and should be fixed.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_core.py::test_warns_if_file_does_not_exist", "tests/test_core.py::test_find_dotenv", "tests/test_core.py::test_load_dotenv", "tests/test_core.py::test_load_dotenv_override", "tests/test_core.py::test_load_dotenv_in_current_dir", "tests/test_core.py::test_ipython", "tests/test_core.py::test_ipython_override", "tests/test_core.py::test_dotenv_values_stream", "tests/test_core.py::test_dotenv_values_export", "tests/test_core.py::test_dotenv_values_utf_8", "tests/test_core.py::test_dotenv_empty_selfreferential_interpolation", "tests/test_core.py::test_dotenv_nonempty_selfreferential_interpolation", "tests/test_parser.py::test_parse_stream[-expected0]", "tests/test_parser.py::test_parse_stream[a=b-expected1]", "tests/test_parser.py::test_parse_stream['a'=b-expected2]", "tests/test_parser.py::test_parse_stream[[=b-expected3]", "tests/test_parser.py::test_parse_stream[", "tests/test_parser.py::test_parse_stream[export", "tests/test_parser.py::test_parse_stream[#", "tests/test_parser.py::test_parse_stream[a=b#c-expected8]", "tests/test_parser.py::test_parse_stream[a=b", "tests/test_parser.py::test_parse_stream[a='b", "tests/test_parser.py::test_parse_stream[a=\"b", "tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected15]", "tests/test_parser.py::test_parse_stream[a='b\\nc'-expected16]", "tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected17]", "tests/test_parser.py::test_parse_stream[a=\"b\\\\nc\"-expected18]", "tests/test_parser.py::test_parse_stream[a='b\\\\nc'-expected19]", "tests/test_parser.py::test_parse_stream[a=\"b\\\\\"c\"-expected20]", "tests/test_parser.py::test_parse_stream[a='b\\\\'c'-expected21]", "tests/test_parser.py::test_parse_stream[a=\\xe0-expected22]", "tests/test_parser.py::test_parse_stream[a=\"\\xe0\"-expected23]", "tests/test_parser.py::test_parse_stream[garbage-expected24]", "tests/test_parser.py::test_parse_stream[a=b\\nc=d-expected25]", "tests/test_parser.py::test_parse_stream[a=b\\r\\nc=d-expected26]", "tests/test_parser.py::test_parse_stream[a=\\nb=c-expected27]", "tests/test_parser.py::test_parse_stream[a=b\\n\\nc=d-expected28]", "tests/test_parser.py::test_parse_stream[a=\"\\nb=c-expected29]", "tests/test_parser.py::test_parse_stream[garbage[%$#\\na=b-expected31]" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-05-21T21:56:18Z"
bsd-3-clause
theskumar__python-dotenv-236
diff --git a/.travis.yml b/.travis.yml index 4b1f886..b26433a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,6 @@ language: python cache: pip os: linux -dist: xenial jobs: include: @@ -21,10 +20,8 @@ jobs: env: TOXENV=py38 - python: "pypy" env: TOXENV=pypy - dist: trusty - python: "pypy3" env: TOXENV=pypy3 - dist: trusty install: - pip install tox diff --git a/CHANGELOG.md b/CHANGELOG.md index 00ce4dc..b693ba7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,10 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - Use current working directory to find `.env` when bundled by PyInstaller (#213 by [@gergelyk]). +### Fixed + +- Fix escaping of quoted values written by `set_key` (#236 by [@bbc2]). + ## [0.11.0] - 2020-02-07 ### Added diff --git a/src/dotenv/main.py b/src/dotenv/main.py index ce83155..93d617d 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -153,8 +153,11 @@ def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"): if " " in value_to_set: quote_mode = "always" - line_template = '{}="{}"\n' if quote_mode == "always" else '{}={}\n' - line_out = line_template.format(key_to_set, value_to_set) + if quote_mode == "always": + value_out = '"{}"'.format(value_to_set.replace('"', '\\"')) + else: + value_out = value_to_set + line_out = "{}={}\n".format(key_to_set, value_out) with rewrite(dotenv_path) as (source, dest): replaced = False
theskumar/python-dotenv
ba5a16e0e349a6a1f26927c5f833a95c4c645647
diff --git a/tests/test_main.py b/tests/test_main.py index 3416e2c..a4fb5b4 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -29,14 +29,25 @@ def test_set_key_no_file(tmp_path): ) -def test_set_key_new(dotenv_file): [email protected]( + "key,value,expected,content", + [ + ("a", "", (True, "a", ""), 'a=""\n'), + ("a", "b", (True, "a", "b"), 'a="b"\n'), + ("a", "'b'", (True, "a", "b"), 'a="b"\n'), + ("a", "\"b\"", (True, "a", "b"), 'a="b"\n'), + ("a", "b'c", (True, "a", "b'c"), 'a="b\'c"\n'), + ("a", "b\"c", (True, "a", "b\"c"), 'a="b\\\"c"\n'), + ], +) +def test_set_key_new(dotenv_file, key, value, expected, content): logger = logging.getLogger("dotenv.main") with mock.patch.object(logger, "warning") as mock_warning: - result = dotenv.set_key(dotenv_file, "foo", "bar") + result = dotenv.set_key(dotenv_file, key, value) - assert result == (True, "foo", "bar") - assert open(dotenv_file, "r").read() == 'foo="bar"\n' + assert result == expected + assert open(dotenv_file, "r").read() == content mock_warning.assert_not_called()
set_key doesn't escape values with quotes in them Suppose you have this code: ```python import dotenv import json key = "KEY_TO_SAVE" value = json.dumps({"test": "me"}) dotenv.set_key(".env", key, value) ``` What I would expect to see is a `.env` file that looks like this: ``` KEY_TO_SAVE="{\"test\": \"me\"}" ``` However what I actually see is this: ``` KEY_TO_SAVE="{"test": "me"}" ``` Because of the lack of escaping quote character, dotenv has effectively changed the value I was trying to set. In this case, the value passed to dotenv is valid JSON, but the value dotenv saves to the file is not valid JSON.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_set_key_new[a-b\"c-expected5-a=\"b\\\\\"c\"\\n]" ]
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key_new[a--expected0-a=\"\"\\n]", "tests/test_main.py::test_set_key_new[a-b-expected1-a=\"b\"\\n]", "tests/test_main.py::test_set_key_new[a-'b'-expected2-a=\"b\"\\n]", "tests/test_main.py::test_set_key_new[a-\"b\"-expected3-a=\"b\"\\n]", "tests/test_main.py::test_set_key_new[a-b'c-expected4-a=\"b'c\"\\n]", "tests/test_main.py::test_set_key_new_with_other_values", "tests/test_main.py::test_set_key_existing", "tests/test_main.py::test_set_key_existing_with_other_values", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_no_file_verbose", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_utf_8", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_stream[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_stream[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_stream[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_stream[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_stream[env4-b=c\\na=${b}-True-expected4]", "tests/test_main.py::test_dotenv_values_stream[env5-a=${b}-True-expected5]", "tests/test_main.py::test_dotenv_values_stream[env6-a=\"${b}\"-True-expected6]", "tests/test_main.py::test_dotenv_values_stream[env7-a='${b}'-True-expected7]", "tests/test_main.py::test_dotenv_values_stream[env8-a=${a}-True-expected8]", "tests/test_main.py::test_dotenv_values_stream[env9-a=${a}-True-expected9]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-02-12T22:37:50Z"
bsd-3-clause
theskumar__python-dotenv-238
diff --git a/CHANGELOG.md b/CHANGELOG.md index 47163f3..8566a50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - Fix escaping of quoted values written by `set_key` (#236 by [@bbc2]). - Fix `dotenv run` crashing on environment variables without values (#237 by [@yannham]). +- Remove warning when last line is empty (#238 by [@bbc2]). ## [0.11.0] - 2020-02-07 diff --git a/src/dotenv/parser.py b/src/dotenv/parser.py index 2904af8..2c93cbd 100644 --- a/src/dotenv/parser.py +++ b/src/dotenv/parser.py @@ -197,6 +197,13 @@ def parse_binding(reader): reader.set_mark() try: reader.read_regex(_multiline_whitespace) + if not reader.has_next(): + return Binding( + key=None, + value=None, + original=reader.get_marked(), + error=False, + ) reader.read_regex(_export) key = parse_key(reader) reader.read_regex(_whitespace)
theskumar/python-dotenv
2f58bccad26e0b728d32ec9bf9493671212dc24f
diff --git a/tests/test_main.py b/tests/test_main.py index a4fb5b4..d867858 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -30,63 +30,32 @@ def test_set_key_no_file(tmp_path): @pytest.mark.parametrize( - "key,value,expected,content", + "before,key,value,expected,after", [ - ("a", "", (True, "a", ""), 'a=""\n'), - ("a", "b", (True, "a", "b"), 'a="b"\n'), - ("a", "'b'", (True, "a", "b"), 'a="b"\n'), - ("a", "\"b\"", (True, "a", "b"), 'a="b"\n'), - ("a", "b'c", (True, "a", "b'c"), 'a="b\'c"\n'), - ("a", "b\"c", (True, "a", "b\"c"), 'a="b\\\"c"\n'), + ("", "a", "", (True, "a", ""), 'a=""\n'), + ("", "a", "b", (True, "a", "b"), 'a="b"\n'), + ("", "a", "'b'", (True, "a", "b"), 'a="b"\n'), + ("", "a", "\"b\"", (True, "a", "b"), 'a="b"\n'), + ("", "a", "b'c", (True, "a", "b'c"), 'a="b\'c"\n'), + ("", "a", "b\"c", (True, "a", "b\"c"), 'a="b\\\"c"\n'), + ("a=b", "a", "c", (True, "a", "c"), 'a="c"\n'), + ("a=b\n", "a", "c", (True, "a", "c"), 'a="c"\n'), + ("a=b\n\n", "a", "c", (True, "a", "c"), 'a="c"\n\n'), + ("a=b\nc=d", "a", "e", (True, "a", "e"), 'a="e"\nc=d'), + ("a=b\nc=d\ne=f", "c", "g", (True, "c", "g"), 'a=b\nc="g"\ne=f'), + ("a=b\n", "c", "d", (True, "c", "d"), 'a=b\nc="d"\n'), ], ) -def test_set_key_new(dotenv_file, key, value, expected, content): +def test_set_key(dotenv_file, before, key, value, expected, after): logger = logging.getLogger("dotenv.main") + with open(dotenv_file, "w") as f: + f.write(before) with mock.patch.object(logger, "warning") as mock_warning: result = dotenv.set_key(dotenv_file, key, value) assert result == expected - assert open(dotenv_file, "r").read() == content - mock_warning.assert_not_called() - - -def test_set_key_new_with_other_values(dotenv_file): - logger = logging.getLogger("dotenv.main") - with open(dotenv_file, "w") as f: - f.write("a=b\n") - - with mock.patch.object(logger, "warning") as mock_warning: - result = dotenv.set_key(dotenv_file, "foo", "bar") - - assert result == (True, "foo", "bar") - assert open(dotenv_file, "r").read() == 'a=b\nfoo="bar"\n' - mock_warning.assert_not_called() - - -def test_set_key_existing(dotenv_file): - logger = logging.getLogger("dotenv.main") - with open(dotenv_file, "w") as f: - f.write("foo=bar") - - with mock.patch.object(logger, "warning") as mock_warning: - result = dotenv.set_key(dotenv_file, "foo", "baz") - - assert result == (True, "foo", "baz") - assert open(dotenv_file, "r").read() == 'foo="baz"\n' - mock_warning.assert_not_called() - - -def test_set_key_existing_with_other_values(dotenv_file): - logger = logging.getLogger("dotenv.main") - with open(dotenv_file, "w") as f: - f.write("a=b\nfoo=bar\nc=d") - - with mock.patch.object(logger, "warning") as mock_warning: - result = dotenv.set_key(dotenv_file, "foo", "baz") - - assert result == (True, "foo", "baz") - assert open(dotenv_file, "r").read() == 'a=b\nfoo="baz"\nc=d' + assert open(dotenv_file, "r").read() == after mock_warning.assert_not_called() diff --git a/tests/test_parser.py b/tests/test_parser.py index dae51d3..f807513 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -86,6 +86,19 @@ from dotenv.parser import Binding, Original, parse_stream Binding(key=u"b", value=u'c', original=Original(string=u"b=c", line=2), error=False), ] ), + ( + u"\n\n", + [ + Binding(key=None, value=None, original=Original(string=u"\n\n", line=1), error=False), + ] + ), + ( + u"a=b\n\n", + [ + Binding(key=u"a", value=u"b", original=Original(string=u"a=b\n", line=1), error=False), + Binding(key=None, value=None, original=Original(string=u"\n", line=2), error=False), + ] + ), ( u'a=b\n\nc=d', [
Python-dotenv could not parse statement Hi Guys, I use dotenv in my Django app and ran into one issue when i runserver: `> Python-dotenv could not parse statement starting at line 10` although the django seem working fine, but I couldn't feel right about this. Here is my .env file: ``` DJANGO_SECRET_KEY="xxxxxxxxxxxxxxxxx" DEBUG=True DATABASE_DEFAULT_ENGINE="django.db.backends.postgresql" DATABASE_DEFAULT_NAME="postgres" DATABASE_DEFAULT_USER="postgres" DATABASE_DEFAULT_PASSWORD="abc123" DATABASE_DEFAULT_HOST="localhost" DATABASE_DEFAULT_PORT="5432" SENDGRID_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxx" ``` And i have load the dotenv in wsgi.py: ``` import os from dotenv import load_dotenv from django.core.wsgi import get_wsgi_application load_dotenv() os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webapp.settings') application = get_wsgi_application() ``` and i even done it in manage.py: ``` import os import sys from pathlib import Path from dotenv import load_dotenv env_path = Path('webapp') / '.env' load_dotenv(dotenv_path=env_path) ... the rest as usual ``` is this a bug or my own mistakes?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a=\"c\"\\n\\n]", "tests/test_parser.py::test_parse_stream[\\n\\n-expected30]", "tests/test_parser.py::test_parse_stream[a=b\\n\\n-expected31]" ]
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key[-a--expected0-a=\"\"\\n]", "tests/test_main.py::test_set_key[-a-b-expected1-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-'b'-expected2-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-\"b\"-expected3-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-b'c-expected4-a=\"b'c\"\\n]", "tests/test_main.py::test_set_key[-a-b\"c-expected5-a=\"b\\\\\"c\"\\n]", "tests/test_main.py::test_set_key[a=b-a-c-expected6-a=\"c\"\\n]", "tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a=\"c\"\\n]", "tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a=\"e\"\\nc=d]", "tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc=\"g\"\\ne=f]", "tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc=\"d\"\\n]", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_no_file_verbose", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_utf_8", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_stream[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_stream[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_stream[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_stream[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_stream[env4-b=c\\na=${b}-True-expected4]", "tests/test_main.py::test_dotenv_values_stream[env5-a=${b}-True-expected5]", "tests/test_main.py::test_dotenv_values_stream[env6-a=\"${b}\"-True-expected6]", "tests/test_main.py::test_dotenv_values_stream[env7-a='${b}'-True-expected7]", "tests/test_main.py::test_dotenv_values_stream[env8-a=${a}-True-expected8]", "tests/test_main.py::test_dotenv_values_stream[env9-a=${a}-True-expected9]", "tests/test_parser.py::test_parse_stream[-expected0]", "tests/test_parser.py::test_parse_stream[a=b-expected1]", "tests/test_parser.py::test_parse_stream['a'=b-expected2]", "tests/test_parser.py::test_parse_stream[[=b-expected3]", "tests/test_parser.py::test_parse_stream[", "tests/test_parser.py::test_parse_stream[export", "tests/test_parser.py::test_parse_stream[#", "tests/test_parser.py::test_parse_stream[a=b#c-expected8]", "tests/test_parser.py::test_parse_stream[a=b", "tests/test_parser.py::test_parse_stream[a='b", "tests/test_parser.py::test_parse_stream[a=\"b", "tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected15]", "tests/test_parser.py::test_parse_stream[a='b\\nc'-expected16]", "tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected17]", "tests/test_parser.py::test_parse_stream[a=\"b\\\\nc\"-expected18]", "tests/test_parser.py::test_parse_stream[a='b\\\\nc'-expected19]", "tests/test_parser.py::test_parse_stream[a=\"b\\\\\"c\"-expected20]", "tests/test_parser.py::test_parse_stream[a='b\\\\'c'-expected21]", "tests/test_parser.py::test_parse_stream[a=\\xe0-expected22]", "tests/test_parser.py::test_parse_stream[a=\"\\xe0\"-expected23]", "tests/test_parser.py::test_parse_stream[no_value_var-expected24]", "tests/test_parser.py::test_parse_stream[a:", "tests/test_parser.py::test_parse_stream[a=b\\nc=d-expected26]", "tests/test_parser.py::test_parse_stream[a=b\\rc=d-expected27]", "tests/test_parser.py::test_parse_stream[a=b\\r\\nc=d-expected28]", "tests/test_parser.py::test_parse_stream[a=\\nb=c-expected29]", "tests/test_parser.py::test_parse_stream[a=b\\n\\nc=d-expected32]", "tests/test_parser.py::test_parse_stream[a=\"\\nb=c-expected33]", "tests/test_parser.py::test_parse_stream[a=b\\n#", "tests/test_parser.py::test_parse_stream[uglyKey[%$=\"S3cr3t_P4ssw#rD\"" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2020-02-21T22:46:26Z"
bsd-3-clause
theskumar__python-dotenv-260
diff --git a/CHANGELOG.md b/CHANGELOG.md index 116d97f..a01d3dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,9 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] -*No unreleased change at this time.* +### Fixed + +- Fix potentially empty expanded value for duplicate key (#260 by [@bbc]). ## [0.14.0] - 2020-07-03 diff --git a/src/dotenv/main.py b/src/dotenv/main.py index 8f77e83..607299a 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -18,7 +18,7 @@ logger = logging.getLogger(__name__) if IS_TYPE_CHECKING: from typing import ( - Dict, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple + Dict, Iterable, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple ) if sys.version_info >= (3, 6): _PathLike = os.PathLike @@ -83,9 +83,13 @@ class DotEnv(): if self._dict: return self._dict - values = OrderedDict(self.parse()) - self._dict = resolve_nested_variables(values) if self.interpolate else values - return self._dict + if self.interpolate: + values = resolve_nested_variables(self.parse()) + else: + values = OrderedDict(self.parse()) + + self._dict = values + return values def parse(self): # type: () -> Iterator[Tuple[Text, Optional[Text]]] @@ -211,7 +215,7 @@ def unset_key(dotenv_path, key_to_unset, quote_mode="always"): def resolve_nested_variables(values): - # type: (Dict[Text, Optional[Text]]) -> Dict[Text, Optional[Text]] + # type: (Iterable[Tuple[Text, Optional[Text]]]) -> Dict[Text, Optional[Text]] def _replacement(name, default): # type: (Text, Optional[Text]) -> Text default = default if default is not None else "" @@ -229,7 +233,7 @@ def resolve_nested_variables(values): new_values = {} - for k, v in values.items(): + for (k, v) in values: new_values[k] = __posix_variable.sub(_re_sub_callback, v) if v is not None else None return new_values
theskumar/python-dotenv
e4bbb8a2aa881409af6fb92933c18e2af6609da8
diff --git a/tests/test_main.py b/tests/test_main.py index 3a3d059..339d00b 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -337,6 +337,8 @@ def test_dotenv_values_file(dotenv_file): # Re-defined and used in file ({"b": "c"}, "b=d\na=${b}", True, {"a": "d", "b": "d"}), + ({}, "a=b\na=c\nd=${a}", True, {"a": "c", "d": "c"}), + ({}, "a=b\nc=${a}\nd=e\nc=${d}", True, {"a": "b", "c": "e", "d": "e"}), ], ) def test_dotenv_values_stream(env, string, interpolate, expected):
Unexpected Behavior when Parsing Duplicate Key Hello. I started using this package yesterday and I am sorry if I am missing something simple. I am trying to read an env file that looks like this: ``` export hello=hi export greetings=${hello} export goodbye=bye export greetings=${goodbye} ``` To read it, I wrote the following small program: ```python import os import dotenv from pathlib import Path env_path = './test.env' dotenv.load_dotenv(dotenv_path=env_path,verbose=True) print(os.getenv('greetings')) ``` When I run this program, it outputs a single empty line. This happens even when I set override to True. I was expecting the program to output 'bye'. Is this a bug or the expected behavior for handling duplicate keys ? I am currently running 3.7.3 and python-dotenv version 0.10.3. Thank you.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_dotenv_values_stream[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]" ]
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key[-a--expected0-a=\"\"\\n]", "tests/test_main.py::test_set_key[-a-b-expected1-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-'b'-expected2-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-\"b\"-expected3-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-b'c-expected4-a=\"b'c\"\\n]", "tests/test_main.py::test_set_key[-a-b\"c-expected5-a=\"b\\\\\"c\"\\n]", "tests/test_main.py::test_set_key[a=b-a-c-expected6-a=\"c\"\\n]", "tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a=\"c\"\\n]", "tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a=\"c\"\\n\\n]", "tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a=\"e\"\\nc=d]", "tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc=\"g\"\\ne=f]", "tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc=\"d\"\\n]", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_no_file_verbose", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_utf_8", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_stream[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_stream[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_stream[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_stream[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_stream[env4-a=${b:-d}-False-expected4]", "tests/test_main.py::test_dotenv_values_stream[env5-a=${b:-d}-True-expected5]", "tests/test_main.py::test_dotenv_values_stream[env6-b=c\\na=${b}-True-expected6]", "tests/test_main.py::test_dotenv_values_stream[env7-a=${b}-True-expected7]", "tests/test_main.py::test_dotenv_values_stream[env8-a=${b:-d}-True-expected8]", "tests/test_main.py::test_dotenv_values_stream[env9-a=\"${b}\"-True-expected9]", "tests/test_main.py::test_dotenv_values_stream[env10-a='${b}'-True-expected10]", "tests/test_main.py::test_dotenv_values_stream[env11-a=x${b}y-True-expected11]", "tests/test_main.py::test_dotenv_values_stream[env12-a=${a}-True-expected12]", "tests/test_main.py::test_dotenv_values_stream[env13-a=${a}-True-expected13]", "tests/test_main.py::test_dotenv_values_stream[env14-a=${a:-c}-True-expected14]", "tests/test_main.py::test_dotenv_values_stream[env15-a=${a:-c}-True-expected15]", "tests/test_main.py::test_dotenv_values_stream[env16-a=${b}${b}-True-expected16]", "tests/test_main.py::test_dotenv_values_stream[env17-b=d\\na=${b}-True-expected17]", "tests/test_main.py::test_dotenv_values_stream[env18-a=b\\na=c\\nd=${a}-True-expected18]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-07-03T16:57:54Z"
bsd-3-clause
theskumar__python-dotenv-277
diff --git a/CHANGELOG.md b/CHANGELOG.md index 34cdb32..b5305f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,8 +19,10 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ### Fixed -- Fix potentially empty expanded value for duplicate key (#260 by [@bbc]). +- Fix potentially empty expanded value for duplicate key (#260 by [@bbc2]). - Fix import error on Python 3.5.0 and 3.5.1 (#267 by [@gongqingkui]). +- Fix parsing of unquoted values containing several adjacent space or tab characters + (#277 by [@bbc2], review by [@x-yuri]). ## [0.14.0] - 2020-07-03 @@ -226,6 +228,7 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). [@theskumar]: https://github.com/theskumar [@ulyssessouza]: https://github.com/ulyssessouza [@venthur]: https://github.com/venthur +[@x-yuri]: https://github.com/x-yuri [@yannham]: https://github.com/yannham [Unreleased]: https://github.com/theskumar/python-dotenv/compare/v0.14.0...HEAD diff --git a/src/dotenv/parser.py b/src/dotenv/parser.py index 4eba0ac..5cb1cdf 100644 --- a/src/dotenv/parser.py +++ b/src/dotenv/parser.py @@ -24,7 +24,7 @@ _unquoted_key = make_regex(r"([^=\#\s]+)") _equal_sign = make_regex(r"(=[^\S\r\n]*)") _single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'") _double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"') -_unquoted_value_part = make_regex(r"([^ \r\n]*)") +_unquoted_value = make_regex(r"([^\r\n]*)") _comment = make_regex(r"(?:[^\S\r\n]*#[^\r\n]*)?") _end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r|$)") _rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?") @@ -167,14 +167,8 @@ def parse_key(reader): def parse_unquoted_value(reader): # type: (Reader) -> Text - value = u"" - while True: - (part,) = reader.read_regex(_unquoted_value_part) - value += part - after = reader.peek(2) - if len(after) < 2 or after[0] in u"\r\n" or after[1] in u" #\r\n": - return value - value += reader.read(2) + (part,) = reader.read_regex(_unquoted_value) + return re.sub(r"\s+#.*", "", part).rstrip() def parse_value(reader):
theskumar/python-dotenv
6ca2e2ab399c7e41be276bc830f21af3092f5d28
diff --git a/tests/test_parser.py b/tests/test_parser.py index f807513..48cecdc 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -19,20 +19,40 @@ from dotenv.parser import Binding, Original, parse_stream (u"# a=b", [Binding(key=None, value=None, original=Original(string=u"# a=b", line=1), error=False)]), (u"a=b#c", [Binding(key=u"a", value=u"b#c", original=Original(string=u"a=b#c", line=1), error=False)]), ( - u'a=b # comment', - [Binding(key=u"a", value=u"b", original=Original(string=u"a=b # comment", line=1), error=False)], + u'a=b #c', + [Binding(key=u"a", value=u"b", original=Original(string=u"a=b #c", line=1), error=False)], ), ( - u"a=b space ", - [Binding(key=u"a", value=u"b space", original=Original(string=u"a=b space ", line=1), error=False)], + u'a=b\t#c', + [Binding(key=u"a", value=u"b", original=Original(string=u"a=b\t#c", line=1), error=False)], ), ( - u"a='b space '", - [Binding(key=u"a", value=u"b space ", original=Original(string=u"a='b space '", line=1), error=False)], + u"a=b c", + [Binding(key=u"a", value=u"b c", original=Original(string=u"a=b c", line=1), error=False)], ), ( - u'a="b space "', - [Binding(key=u"a", value=u"b space ", original=Original(string=u'a="b space "', line=1), error=False)], + u"a=b\tc", + [Binding(key=u"a", value=u"b\tc", original=Original(string=u"a=b\tc", line=1), error=False)], + ), + ( + u"a=b c", + [Binding(key=u"a", value=u"b c", original=Original(string=u"a=b c", line=1), error=False)], + ), + ( + u"a=b\u00a0 c", + [Binding(key=u"a", value=u"b\u00a0 c", original=Original(string=u"a=b\u00a0 c", line=1), error=False)], + ), + ( + u"a=b c ", + [Binding(key=u"a", value=u"b c", original=Original(string=u"a=b c ", line=1), error=False)], + ), + ( + u"a='b c '", + [Binding(key=u"a", value=u"b c ", original=Original(string=u"a='b c '", line=1), error=False)], + ), + ( + u'a="b c "', + [Binding(key=u"a", value=u"b c ", original=Original(string=u'a="b c "', line=1), error=False)], ), ( u"export export_a=1",
Parsing rules are somewhat unexpected `A=1 2` (one space) works, but `A=1 2` (two spaces) produces an error. I've run into it with `docker-compose`: `.env`: ```yaml A=1 2 ``` `docker-compose.yml`: ```yaml version: '3' services: app: image: alpine:3.12 command: /app/1.sh env_file: .env volumes: - .:/app ``` `1.sh`: ```sh #!/bin/sh -eux set | grep ^A ``` ``` $ docker-compose up -d && docker-compose logs -f WARNING: Python-dotenv could not parse statement starting at line 1 WARNING: Python-dotenv could not parse statement starting at line 1 WARNING: Python-dotenv could not parse statement starting at line 1 WARNING: Python-dotenv could not parse statement starting at line 1 Creating network "overriding-vars_default" with the default driver Creating overriding-vars_app_1 ... done WARNING: Python-dotenv could not parse statement starting at line 1 WARNING: Python-dotenv could not parse statement starting at line 1 Attaching to overriding-vars_app_1 app_1 | + set app_1 | + grep ^A overriding-vars_app_1 exited with code 1 ``` With `A=1 2`: ``` $ docker-compose up -d && docker-compose logs -f Recreating overriding-vars_app_1 ... done Attaching to overriding-vars_app_1 app_1 | + set app_1 | + grep ^A app_1 | A='1 2' overriding-vars_app_1 exited with code 0 ``` I'll provide a Python test case, if need be. https://github.com/theskumar/python-dotenv/blob/v0.14.0/src/dotenv/parser.py#L175-L176
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_parser.py::test_parse_stream[a=b", "tests/test_parser.py::test_parse_stream[a=b\\t#c-expected10]" ]
[ "tests/test_parser.py::test_parse_stream[-expected0]", "tests/test_parser.py::test_parse_stream[a=b-expected1]", "tests/test_parser.py::test_parse_stream['a'=b-expected2]", "tests/test_parser.py::test_parse_stream[[=b-expected3]", "tests/test_parser.py::test_parse_stream[", "tests/test_parser.py::test_parse_stream[export", "tests/test_parser.py::test_parse_stream[#", "tests/test_parser.py::test_parse_stream[a=b#c-expected8]", "tests/test_parser.py::test_parse_stream[a=b\\tc-expected12]", "tests/test_parser.py::test_parse_stream[a=b\\xa0", "tests/test_parser.py::test_parse_stream[a='b", "tests/test_parser.py::test_parse_stream[a=\"b", "tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected20]", "tests/test_parser.py::test_parse_stream[a='b\\nc'-expected21]", "tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected22]", "tests/test_parser.py::test_parse_stream[a=\"b\\\\nc\"-expected23]", "tests/test_parser.py::test_parse_stream[a='b\\\\nc'-expected24]", "tests/test_parser.py::test_parse_stream[a=\"b\\\\\"c\"-expected25]", "tests/test_parser.py::test_parse_stream[a='b\\\\'c'-expected26]", "tests/test_parser.py::test_parse_stream[a=\\xe0-expected27]", "tests/test_parser.py::test_parse_stream[a=\"\\xe0\"-expected28]", "tests/test_parser.py::test_parse_stream[no_value_var-expected29]", "tests/test_parser.py::test_parse_stream[a:", "tests/test_parser.py::test_parse_stream[a=b\\nc=d-expected31]", "tests/test_parser.py::test_parse_stream[a=b\\rc=d-expected32]", "tests/test_parser.py::test_parse_stream[a=b\\r\\nc=d-expected33]", "tests/test_parser.py::test_parse_stream[a=\\nb=c-expected34]", "tests/test_parser.py::test_parse_stream[\\n\\n-expected35]", "tests/test_parser.py::test_parse_stream[a=b\\n\\n-expected36]", "tests/test_parser.py::test_parse_stream[a=b\\n\\nc=d-expected37]", "tests/test_parser.py::test_parse_stream[a=\"\\nb=c-expected38]", "tests/test_parser.py::test_parse_stream[a=b\\n#", "tests/test_parser.py::test_parse_stream[uglyKey[%$=\"S3cr3t_P4ssw#rD\"" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-09-09T09:06:18Z"
bsd-3-clause
theskumar__python-dotenv-287
diff --git a/.travis.yml b/.travis.yml index 8f51de3..8ccd240 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,6 +33,7 @@ script: - tox before_install: + - pip install --upgrade pip - pip install coveralls after_success: diff --git a/CHANGELOG.md b/CHANGELOG.md index 56a7a94..effa251 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] -_There are no unreleased changes at this time._ +- Fix resolution order in variable expansion with `override=False` (#? by [@bbc2]). ## [0.15.0] - 2020-10-28 diff --git a/README.md b/README.md index 5c9aeaf..36f3b2b 100644 --- a/README.md +++ b/README.md @@ -41,13 +41,22 @@ export SECRET_KEY=YOURSECRETKEYGOESHERE Python-dotenv can interpolate variables using POSIX variable expansion. -The value of a variable is the first of the values defined in the following list: +With `load_dotenv(override=True)` or `dotenv_values()`, the value of a variable is the +first of the values defined in the following list: - Value of that variable in the `.env` file. - Value of that variable in the environment. - Default value, if provided. - Empty string. +With `load_dotenv(override=False)`, the value of a variable is the first of the values +defined in the following list: + +- Value of that variable in the environment. +- Value of that variable in the `.env` file. +- Default value, if provided. +- Empty string. + Ensure that variables are surrounded with `{}` like `${HOME}` as bare variables such as `$HOME` are not expanded. diff --git a/src/dotenv/main.py b/src/dotenv/main.py index 58a23f3..b366b18 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -4,7 +4,6 @@ from __future__ import absolute_import, print_function, unicode_literals import io import logging import os -import re import shutil import sys import tempfile @@ -13,13 +12,13 @@ from contextlib import contextmanager from .compat import IS_TYPE_CHECKING, PY2, StringIO, to_env from .parser import Binding, parse_stream +from .variables import parse_variables logger = logging.getLogger(__name__) if IS_TYPE_CHECKING: - from typing import ( - Dict, Iterable, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple - ) + from typing import (IO, Dict, Iterable, Iterator, Mapping, Optional, Text, + Tuple, Union) if sys.version_info >= (3, 6): _PathLike = os.PathLike else: @@ -30,18 +29,6 @@ if IS_TYPE_CHECKING: else: _StringIO = StringIO[Text] -__posix_variable = re.compile( - r""" - \$\{ - (?P<name>[^\}:]*) - (?::- - (?P<default>[^\}]*) - )? - \} - """, - re.VERBOSE, -) # type: Pattern[Text] - def with_warn_for_invalid_lines(mappings): # type: (Iterator[Binding]) -> Iterator[Binding] @@ -56,13 +43,14 @@ def with_warn_for_invalid_lines(mappings): class DotEnv(): - def __init__(self, dotenv_path, verbose=False, encoding=None, interpolate=True): - # type: (Union[Text, _PathLike, _StringIO], bool, Union[None, Text], bool) -> None + def __init__(self, dotenv_path, verbose=False, encoding=None, interpolate=True, override=True): + # type: (Union[Text, _PathLike, _StringIO], bool, Union[None, Text], bool, bool) -> None self.dotenv_path = dotenv_path # type: Union[Text,_PathLike, _StringIO] self._dict = None # type: Optional[Dict[Text, Optional[Text]]] self.verbose = verbose # type: bool self.encoding = encoding # type: Union[None, Text] self.interpolate = interpolate # type: bool + self.override = override # type: bool @contextmanager def _get_stream(self): @@ -83,13 +71,14 @@ class DotEnv(): if self._dict: return self._dict + raw_values = self.parse() + if self.interpolate: - values = resolve_nested_variables(self.parse()) + self._dict = OrderedDict(resolve_variables(raw_values, override=self.override)) else: - values = OrderedDict(self.parse()) + self._dict = OrderedDict(raw_values) - self._dict = values - return values + return self._dict def parse(self): # type: () -> Iterator[Tuple[Text, Optional[Text]]] @@ -98,13 +87,13 @@ class DotEnv(): if mapping.key is not None: yield mapping.key, mapping.value - def set_as_environment_variables(self, override=False): - # type: (bool) -> bool + def set_as_environment_variables(self): + # type: () -> bool """ Load the current dotenv as system environemt variable. """ for k, v in self.dict().items(): - if k in os.environ and not override: + if k in os.environ and not self.override: continue if v is not None: os.environ[to_env(k)] = to_env(v) @@ -217,27 +206,26 @@ def unset_key(dotenv_path, key_to_unset, quote_mode="always"): return removed, key_to_unset -def resolve_nested_variables(values): - # type: (Iterable[Tuple[Text, Optional[Text]]]) -> Dict[Text, Optional[Text]] - def _replacement(name, default): - # type: (Text, Optional[Text]) -> Text - default = default if default is not None else "" - ret = new_values.get(name, os.getenv(name, default)) - return ret # type: ignore +def resolve_variables(values, override): + # type: (Iterable[Tuple[Text, Optional[Text]]], bool) -> Mapping[Text, Optional[Text]] - def _re_sub_callback(match): - # type: (Match[Text]) -> Text - """ - From a match object gets the variable name and returns - the correct replacement - """ - matches = match.groupdict() - return _replacement(name=matches["name"], default=matches["default"]) # type: ignore + new_values = {} # type: Dict[Text, Optional[Text]] - new_values = {} + for (name, value) in values: + if value is None: + result = None + else: + atoms = parse_variables(value) + env = {} # type: Dict[Text, Optional[Text]] + if override: + env.update(os.environ) # type: ignore + env.update(new_values) + else: + env.update(new_values) + env.update(os.environ) # type: ignore + result = "".join(atom.resolve(env) for atom in atoms) - for (k, v) in values: - new_values[k] = __posix_variable.sub(_re_sub_callback, v) if v is not None else None + new_values[name] = result return new_values @@ -316,10 +304,11 @@ def load_dotenv(dotenv_path=None, stream=None, verbose=False, override=False, in Defaults to `False`. """ f = dotenv_path or stream or find_dotenv() - return DotEnv(f, verbose=verbose, interpolate=interpolate, **kwargs).set_as_environment_variables(override=override) + dotenv = DotEnv(f, verbose=verbose, interpolate=interpolate, override=override, **kwargs) + return dotenv.set_as_environment_variables() def dotenv_values(dotenv_path=None, stream=None, verbose=False, interpolate=True, **kwargs): # type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, bool, Union[None, Text]) -> Dict[Text, Optional[Text]] # noqa: E501 f = dotenv_path or stream or find_dotenv() - return DotEnv(f, verbose=verbose, interpolate=interpolate, **kwargs).dict() + return DotEnv(f, verbose=verbose, interpolate=interpolate, override=True, **kwargs).dict() diff --git a/src/dotenv/variables.py b/src/dotenv/variables.py new file mode 100644 index 0000000..4828dfc --- /dev/null +++ b/src/dotenv/variables.py @@ -0,0 +1,106 @@ +import re +from abc import ABCMeta + +from .compat import IS_TYPE_CHECKING + +if IS_TYPE_CHECKING: + from typing import Iterator, Mapping, Optional, Pattern, Text + + +_posix_variable = re.compile( + r""" + \$\{ + (?P<name>[^\}:]*) + (?::- + (?P<default>[^\}]*) + )? + \} + """, + re.VERBOSE, +) # type: Pattern[Text] + + +class Atom(): + __metaclass__ = ABCMeta + + def __ne__(self, other): + # type: (object) -> bool + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + return not result + + def resolve(self, env): + # type: (Mapping[Text, Optional[Text]]) -> Text + raise NotImplementedError + + +class Literal(Atom): + def __init__(self, value): + # type: (Text) -> None + self.value = value + + def __repr__(self): + # type: () -> str + return "Literal(value={})".format(self.value) + + def __eq__(self, other): + # type: (object) -> bool + if not isinstance(other, self.__class__): + return NotImplemented + return self.value == other.value + + def __hash__(self): + # type: () -> int + return hash((self.__class__, self.value)) + + def resolve(self, env): + # type: (Mapping[Text, Optional[Text]]) -> Text + return self.value + + +class Variable(Atom): + def __init__(self, name, default): + # type: (Text, Optional[Text]) -> None + self.name = name + self.default = default + + def __repr__(self): + # type: () -> str + return "Variable(name={}, default={})".format(self.name, self.default) + + def __eq__(self, other): + # type: (object) -> bool + if not isinstance(other, self.__class__): + return NotImplemented + return (self.name, self.default) == (other.name, other.default) + + def __hash__(self): + # type: () -> int + return hash((self.__class__, self.name, self.default)) + + def resolve(self, env): + # type: (Mapping[Text, Optional[Text]]) -> Text + default = self.default if self.default is not None else "" + result = env.get(self.name, default) + return result if result is not None else "" + + +def parse_variables(value): + # type: (Text) -> Iterator[Atom] + cursor = 0 + + for match in _posix_variable.finditer(value): + (start, end) = match.span() + name = match.groupdict()["name"] + default = match.groupdict()["default"] + + if start > cursor: + yield Literal(value=value[cursor:start]) + + yield Variable(name=name, default=default) + cursor = end + + length = len(value) + if cursor < length: + yield Literal(value=value[cursor:length])
theskumar/python-dotenv
e13d957bf48224453c5d9d9a7a83a13b999e0196
diff --git a/tests/test_main.py b/tests/test_main.py index 6b9458d..b927d7f 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -257,6 +257,28 @@ def test_load_dotenv_existing_variable_override(dotenv_file): assert os.environ == {"a": "b"} [email protected](os.environ, {"a": "c"}, clear=True) +def test_load_dotenv_redefine_var_used_in_file_no_override(dotenv_file): + with open(dotenv_file, "w") as f: + f.write('a=b\nd="${a}"') + + result = dotenv.load_dotenv(dotenv_file) + + assert result is True + assert os.environ == {"a": "c", "d": "c"} + + [email protected](os.environ, {"a": "c"}, clear=True) +def test_load_dotenv_redefine_var_used_in_file_with_override(dotenv_file): + with open(dotenv_file, "w") as f: + f.write('a=b\nd="${a}"') + + result = dotenv.load_dotenv(dotenv_file, override=True) + + assert result is True + assert os.environ == {"a": "b", "d": "b"} + + @mock.patch.dict(os.environ, {}, clear=True) def test_load_dotenv_utf_8(): stream = StringIO("a=à") diff --git a/tests/test_variables.py b/tests/test_variables.py new file mode 100644 index 0000000..86b0646 --- /dev/null +++ b/tests/test_variables.py @@ -0,0 +1,35 @@ +import pytest + +from dotenv.variables import Literal, Variable, parse_variables + + [email protected]( + "value,expected", + [ + ("", []), + ("a", [Literal(value="a")]), + ("${a}", [Variable(name="a", default=None)]), + ("${a:-b}", [Variable(name="a", default="b")]), + ( + "${a}${b}", + [ + Variable(name="a", default=None), + Variable(name="b", default=None), + ], + ), + ( + "a${b}c${d}e", + [ + Literal(value="a"), + Variable(name="b", default=None), + Literal(value="c"), + Variable(name="d", default=None), + Literal(value="e"), + ], + ), + ] +) +def test_parse_variables(value, expected): + result = parse_variables(value) + + assert list(result) == expected
Inconsistency in priorities of variable expansion # Behavior python-dotenv priorities are correctly documented, but inconsistent when loading a .env file with internal dependencies in variable expansion. Consider the following `.env` file: ``` HOSTNAME=localhost:8080 URL=http://${HOSTNAME} ``` Currently, if we set the `HOSTNAME` environment variable to `localhost:8081`, then run `load_dotenv()`, then the environment ends up with `HOSTNAME=localhost:8081` and `URL=http://localhost8080`, which at first is surprising. # Underlying reason The reason for this is that (by default) `load_dotenv()` does not override set environment variables, but when doing variable expansion inside a given file, `load_dotenv`prefers the variable as set in the file than from the environment (as documented in https://github.com/theskumar/python-dotenv#usages). # Workaround A workaround consists of changing the priorities in the file explicitly, as in this example: ``` HOSTNAME=${HOSTNAME:-localhost:8080} URL=http://${HOSTNAME} ``` # Suggestion I would suggest that the variable expansion priority logic follow the same behavior as the general "overriding or not" behavior. More precisely: - when `override==False`, variable expansion should follow the following priorities: - Value of that variable in the environment. - Value of that variable in the .env file. - Default value, if provided. - Empty string. - when `override==True`, variable expansion should follow the currently documented priorities: - Value of that variable in the .env file. - Value of that variable in the environment. - Default value, if provided. - Empty string. It would consist of a breaking change in behavior, but it seems that in the case it does matter, the current behavior is probably not what is expected. Is that a change you'd be willing to accept?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key[-a--expected0-a=\"\"\\n]", "tests/test_main.py::test_set_key[-a-b-expected1-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-'b'-expected2-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-\"b\"-expected3-a=\"b\"\\n]", "tests/test_main.py::test_set_key[-a-b'c-expected4-a=\"b'c\"\\n]", "tests/test_main.py::test_set_key[-a-b\"c-expected5-a=\"b\\\\\"c\"\\n]", "tests/test_main.py::test_set_key[a=b-a-c-expected6-a=\"c\"\\n]", "tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a=\"c\"\\n]", "tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a=\"c\"\\n\\n]", "tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a=\"e\"\\nc=d]", "tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc=\"g\"\\ne=f]", "tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc=\"d\"\\n]", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_no_file_verbose", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override", "tests/test_main.py::test_load_dotenv_utf_8", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_stream[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_stream[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_stream[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_stream[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_stream[env4-a=${b:-d}-False-expected4]", "tests/test_main.py::test_dotenv_values_stream[env5-a=${b:-d}-True-expected5]", "tests/test_main.py::test_dotenv_values_stream[env6-b=c\\na=${b}-True-expected6]", "tests/test_main.py::test_dotenv_values_stream[env7-a=${b}-True-expected7]", "tests/test_main.py::test_dotenv_values_stream[env8-a=${b:-d}-True-expected8]", "tests/test_main.py::test_dotenv_values_stream[env9-a=\"${b}\"-True-expected9]", "tests/test_main.py::test_dotenv_values_stream[env10-a='${b}'-True-expected10]", "tests/test_main.py::test_dotenv_values_stream[env11-a=x${b}y-True-expected11]", "tests/test_main.py::test_dotenv_values_stream[env12-a=${a}-True-expected12]", "tests/test_main.py::test_dotenv_values_stream[env13-a=${a}-True-expected13]", "tests/test_main.py::test_dotenv_values_stream[env14-a=${a:-c}-True-expected14]", "tests/test_main.py::test_dotenv_values_stream[env15-a=${a:-c}-True-expected15]", "tests/test_main.py::test_dotenv_values_stream[env16-a=${b}${b}-True-expected16]", "tests/test_main.py::test_dotenv_values_stream[env17-b=d\\na=${b}-True-expected17]", "tests/test_main.py::test_dotenv_values_stream[env18-a=b\\na=c\\nd=${a}-True-expected18]", "tests/test_main.py::test_dotenv_values_stream[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]", "tests/test_variables.py::test_parse_variables[-expected0]", "tests/test_variables.py::test_parse_variables[a-expected1]", "tests/test_variables.py::test_parse_variables[${a}-expected2]", "tests/test_variables.py::test_parse_variables[${a:-b}-expected3]", "tests/test_variables.py::test_parse_variables[${a}${b}-expected4]", "tests/test_variables.py::test_parse_variables[a${b}c${d}e-expected5]" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-11-15T14:30:17Z"
bsd-3-clause
theskumar__python-dotenv-348
diff --git a/CHANGELOG.md b/CHANGELOG.md index f52cf07..cea2053 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,16 +7,19 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] -### Added - -- The `dotenv_path` argument of `set_key` and `unset_key` now has a type of `Union[str, - os.PathLike]` instead of just `os.PathLike` (#347 by [@bbc2]). - ### Changed - Require Python 3.5 or a later version. Python 2 and 3.4 are no longer supported. (#341 by [@bbc2]). +### Added + +- The `dotenv_path` argument of `set_key` and `unset_key` now has a type of `Union[str, + os.PathLike]` instead of just `os.PathLike` (#347 by [@bbc2]). +- The `stream` argument of `load_dotenv` and `dotenv_values` can now be a text stream + (`IO[str]`), which includes values like `io.StringIO("foo")` and `open("file.env", + "r")` (#348 by [@bbc2]). + ## [0.18.0] - 2021-06-20 ### Changed diff --git a/src/dotenv/main.py b/src/dotenv/main.py index d550f6f..b8d0a4e 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -33,13 +33,15 @@ def with_warn_for_invalid_lines(mappings: Iterator[Binding]) -> Iterator[Binding class DotEnv(): def __init__( self, - dotenv_path: Union[str, _PathLike, io.StringIO], + dotenv_path: Optional[Union[str, _PathLike]], + stream: Optional[IO[str]] = None, verbose: bool = False, encoding: Union[None, str] = None, interpolate: bool = True, override: bool = True, ) -> None: - self.dotenv_path = dotenv_path # type: Union[str,_PathLike, io.StringIO] + self.dotenv_path = dotenv_path # type: Optional[Union[str, _PathLike]] + self.stream = stream # type: Optional[IO[str]] self._dict = None # type: Optional[Dict[str, Optional[str]]] self.verbose = verbose # type: bool self.encoding = encoding # type: Union[None, str] @@ -48,14 +50,17 @@ class DotEnv(): @contextmanager def _get_stream(self) -> Iterator[IO[str]]: - if isinstance(self.dotenv_path, io.StringIO): - yield self.dotenv_path - elif os.path.isfile(self.dotenv_path): + if self.dotenv_path and os.path.isfile(self.dotenv_path): with io.open(self.dotenv_path, encoding=self.encoding) as stream: yield stream + elif self.stream is not None: + yield self.stream else: if self.verbose: - logger.info("Python-dotenv could not find configuration file %s.", self.dotenv_path or '.env') + logger.info( + "Python-dotenv could not find configuration file %s.", + self.dotenv_path or '.env', + ) yield io.StringIO('') def dict(self) -> Dict[str, Optional[str]]: @@ -290,7 +295,7 @@ def find_dotenv( def load_dotenv( dotenv_path: Union[str, _PathLike, None] = None, - stream: Optional[io.StringIO] = None, + stream: Optional[IO[str]] = None, verbose: bool = False, override: bool = False, interpolate: bool = True, @@ -299,7 +304,8 @@ def load_dotenv( """Parse a .env file and then load all the variables found as environment variables. - *dotenv_path*: absolute or relative path to .env file. - - *stream*: `StringIO` object with .env content, used if `dotenv_path` is `None`. + - *stream*: Text stream (such as `io.StringIO`) with .env content, used if + `dotenv_path` is `None`. - *verbose*: whether to output a warning the .env file is missing. Defaults to `False`. - *override*: whether to override the system environment variables with the variables @@ -308,9 +314,12 @@ def load_dotenv( If both `dotenv_path` and `stream`, `find_dotenv()` is used to find the .env file. """ - f = dotenv_path or stream or find_dotenv() + if dotenv_path is None and stream is None: + dotenv_path = find_dotenv() + dotenv = DotEnv( - f, + dotenv_path=dotenv_path, + stream=stream, verbose=verbose, interpolate=interpolate, override=override, @@ -321,7 +330,7 @@ def load_dotenv( def dotenv_values( dotenv_path: Union[str, _PathLike, None] = None, - stream: Optional[io.StringIO] = None, + stream: Optional[IO[str]] = None, verbose: bool = False, interpolate: bool = True, encoding: Optional[str] = "utf-8", @@ -338,9 +347,12 @@ def dotenv_values( If both `dotenv_path` and `stream`, `find_dotenv()` is used to find the .env file. """ - f = dotenv_path or stream or find_dotenv() + if dotenv_path is None and stream is None: + dotenv_path = find_dotenv() + return DotEnv( - f, + dotenv_path=dotenv_path, + stream=stream, verbose=verbose, interpolate=interpolate, override=True,
theskumar/python-dotenv
955e2a4ea6391a322c779e737f5a7aca7eaa963d
diff --git a/tests/test_main.py b/tests/test_main.py index d612bb2..13e2791 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -277,7 +277,7 @@ def test_load_dotenv_redefine_var_used_in_file_with_override(dotenv_file): @mock.patch.dict(os.environ, {}, clear=True) -def test_load_dotenv_utf_8(): +def test_load_dotenv_string_io_utf_8(): stream = io.StringIO("a=à") result = dotenv.load_dotenv(stream=stream) @@ -286,6 +286,18 @@ def test_load_dotenv_utf_8(): assert os.environ == {"a": "à"} [email protected](os.environ, {}, clear=True) +def test_load_dotenv_file_stream(dotenv_file): + with open(dotenv_file, "w") as f: + f.write("a=b") + + with open(dotenv_file, "r") as f: + result = dotenv.load_dotenv(stream=f) + + assert result is True + assert os.environ == {"a": "b"} + + def test_load_dotenv_in_current_dir(tmp_path): dotenv_path = tmp_path / '.env' dotenv_path.write_bytes(b'a=b') @@ -353,7 +365,7 @@ def test_dotenv_values_file(dotenv_file): ({}, "a=b\nc=${a}\nd=e\nc=${d}", True, {"a": "b", "c": "e", "d": "e"}), ], ) -def test_dotenv_values_stream(env, string, interpolate, expected): +def test_dotenv_values_string_io(env, string, interpolate, expected): with mock.patch.dict(os.environ, env, clear=True): stream = io.StringIO(string) stream.seek(0) @@ -361,3 +373,13 @@ def test_dotenv_values_stream(env, string, interpolate, expected): result = dotenv.dotenv_values(stream=stream, interpolate=interpolate) assert result == expected + + +def test_dotenv_values_file_stream(dotenv_file): + with open(dotenv_file, "w") as f: + f.write("a=b") + + with open(dotenv_file, "r") as f: + result = dotenv.dotenv_values(stream=f) + + assert result == {"a": "b"}
Stream as any text file? As noted before #208 #156 the code spec https://github.com/theskumar/python-dotenv/blob/a7fe93f6cc73ab9de28191e3854f1a713d53363b/src/dotenv/main.py#L301 is unusually restrictive. It'd be useful to accept any text file as a stream. My use case would be packaging a `.env.defaults` file opened by [importlib.resources.open_text](https://docs.python.org/3/library/importlib.html#importlib.resources.open_text), so you can uniformly manage/document environment & defaults via `.env` files.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_load_dotenv_file_stream", "tests/test_main.py::test_dotenv_values_file_stream" ]
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key[-a--expected0-a=''\\n]", "tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]", "tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]", "tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]", "tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]", "tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]", "tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]", "tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]", "tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]", "tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_no_file_verbose", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override", "tests/test_main.py::test_load_dotenv_string_io_utf_8", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]", "tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]", "tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]", "tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]", "tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]", "tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]", "tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]", "tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]", "tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]", "tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]", "tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]", "tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]", "tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]", "tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]", "tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]", "tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-07-14T09:01:18Z"
bsd-3-clause
theskumar__python-dotenv-361
diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b2b2bb..811ed1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## Unreleased + +### Fixed + +- In `set_key`, add missing newline character before new entry if necessary. (#361 by + [@bbc2]) + ## [0.19.1] - 2021-08-09 ### Added diff --git a/src/dotenv/main.py b/src/dotenv/main.py index b8d0a4e..d867f02 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -167,13 +167,17 @@ def set_key( with rewrite(dotenv_path) as (source, dest): replaced = False + missing_newline = False for mapping in with_warn_for_invalid_lines(parse_stream(source)): if mapping.key == key_to_set: dest.write(line_out) replaced = True else: dest.write(mapping.original.string) + missing_newline = not mapping.original.string.endswith("\n") if not replaced: + if missing_newline: + dest.write("\n") dest.write(line_out) return True, key_to_set, value_to_set
theskumar/python-dotenv
fc138ce8a430b758f4f2c89bc8104f259e2cba38
diff --git a/tests/test_main.py b/tests/test_main.py index 13e2791..541ac5e 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -37,6 +37,7 @@ def test_set_key_no_file(tmp_path): ("a=b\nc=d", "a", "e", (True, "a", "e"), "a='e'\nc=d"), ("a=b\nc=d\ne=f", "c", "g", (True, "c", "g"), "a=b\nc='g'\ne=f"), ("a=b\n", "c", "d", (True, "c", "d"), "a=b\nc='d'\n"), + ("a=b", "c", "d", (True, "c", "d"), "a=b\nc='d'\n"), ], ) def test_set_key(dotenv_file, before, key, value, expected, after):
set_key does not check if there is a carriage return before inserting new key Example script : ``` import os import dotenv DOTENV_FILE = os.path.join(os.path.dirname(__file__), ".env") def init_dotenv(): with open(DOTENV_FILE, "w") as fp: fp.write("TEST=ABCD") def print_dotenv(): with open(DOTENV_FILE) as fp: content = fp.read() print("-" * 30, "DOTENV", "-" * 30) print(content) print() init_dotenv() print_dotenv() dotenv.set_key(DOTENV_FILE, "FOO", "1234") print_dotenv() ``` Output ``` ------------------------------ DOTENV ------------------------------ TEST=ABCD ------------------------------ DOTENV ------------------------------ TEST=ABCDFOO='1234' ``` Is this something you want to fix ?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_set_key[a=b-c-d-expected12-a=b\\nc='d'\\n]" ]
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key[-a--expected0-a=''\\n]", "tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]", "tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]", "tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]", "tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]", "tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]", "tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]", "tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]", "tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]", "tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_no_file_verbose", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override", "tests/test_main.py::test_load_dotenv_string_io_utf_8", "tests/test_main.py::test_load_dotenv_file_stream", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]", "tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]", "tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]", "tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]", "tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]", "tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]", "tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]", "tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]", "tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]", "tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]", "tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]", "tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]", "tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]", "tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]", "tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]", "tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]", "tests/test_main.py::test_dotenv_values_file_stream" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-10-23T09:42:48Z"
bsd-3-clause
theskumar__python-dotenv-379
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b18856..3d4d014 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## Unreleased + +### Added + +- Add `encoding` (`Optional[str]`) parameter to `get_key`, `set_key` and `unset_key`. + (#379 by [@bbc2]) + ## [0.19.2] - 2021-11-11 ### Fixed diff --git a/src/dotenv/main.py b/src/dotenv/main.py index d867f02..20ac61b 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -109,23 +109,30 @@ class DotEnv(): return None -def get_key(dotenv_path: Union[str, _PathLike], key_to_get: str) -> Optional[str]: +def get_key( + dotenv_path: Union[str, _PathLike], + key_to_get: str, + encoding: Optional[str] = "utf-8", +) -> Optional[str]: """ - Gets the value of a given key from the given .env + Get the value of a given key from the given .env. - If the .env path given doesn't exist, fails + Returns `None` if the key isn't found or doesn't have a value. """ - return DotEnv(dotenv_path, verbose=True).get(key_to_get) + return DotEnv(dotenv_path, verbose=True, encoding=encoding).get(key_to_get) @contextmanager -def rewrite(path: Union[str, _PathLike]) -> Iterator[Tuple[IO[str], IO[str]]]: +def rewrite( + path: Union[str, _PathLike], + encoding: Optional[str], +) -> Iterator[Tuple[IO[str], IO[str]]]: try: if not os.path.isfile(path): - with io.open(path, "w+") as source: + with io.open(path, "w+", encoding=encoding) as source: source.write("") - with tempfile.NamedTemporaryFile(mode="w+", delete=False) as dest: - with io.open(path) as source: + with tempfile.NamedTemporaryFile(mode="w+", delete=False, encoding=encoding) as dest: + with io.open(path, encoding=encoding) as source: yield (source, dest) # type: ignore except BaseException: if os.path.isfile(dest.name): @@ -141,6 +148,7 @@ def set_key( value_to_set: str, quote_mode: str = "always", export: bool = False, + encoding: Optional[str] = "utf-8", ) -> Tuple[Optional[bool], str, str]: """ Adds or Updates a key/value to the given .env @@ -165,7 +173,7 @@ def set_key( else: line_out = "{}={}\n".format(key_to_set, value_out) - with rewrite(dotenv_path) as (source, dest): + with rewrite(dotenv_path, encoding=encoding) as (source, dest): replaced = False missing_newline = False for mapping in with_warn_for_invalid_lines(parse_stream(source)): @@ -187,6 +195,7 @@ def unset_key( dotenv_path: Union[str, _PathLike], key_to_unset: str, quote_mode: str = "always", + encoding: Optional[str] = "utf-8", ) -> Tuple[Optional[bool], str]: """ Removes a given key from the given .env @@ -199,7 +208,7 @@ def unset_key( return None, key_to_unset removed = False - with rewrite(dotenv_path) as (source, dest): + with rewrite(dotenv_path, encoding=encoding) as (source, dest): for mapping in with_warn_for_invalid_lines(parse_stream(source)): if mapping.key == key_to_unset: removed = True
theskumar/python-dotenv
ba9408c5048e8e512318df423541d2b44ac6019f
diff --git a/tests/test_main.py b/tests/test_main.py index 541ac5e..364fc24 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -53,6 +53,15 @@ def test_set_key(dotenv_file, before, key, value, expected, after): mock_warning.assert_not_called() +def test_set_key_encoding(dotenv_file): + encoding = "latin-1" + + result = dotenv.set_key(dotenv_file, "a", "é", encoding=encoding) + + assert result == (True, "a", "é") + assert open(dotenv_file, "r", encoding=encoding).read() == "a='é'\n" + + def test_set_key_permission_error(dotenv_file): os.chmod(dotenv_file, 0o000) @@ -107,6 +116,16 @@ def test_get_key_ok(dotenv_file): mock_warning.assert_not_called() +def test_get_key_encoding(dotenv_file): + encoding = "latin-1" + with open(dotenv_file, "w", encoding=encoding) as f: + f.write("é=è") + + result = dotenv.get_key(dotenv_file, "é", encoding=encoding) + + assert result == "è" + + def test_get_key_none(dotenv_file): logger = logging.getLogger("dotenv.main") with open(dotenv_file, "w") as f: @@ -147,6 +166,18 @@ def test_unset_no_value(dotenv_file): mock_warning.assert_not_called() +def test_unset_encoding(dotenv_file): + encoding = "latin-1" + with open(dotenv_file, "w", encoding=encoding) as f: + f.write("é=x") + + result = dotenv.unset_key(dotenv_file, "é", encoding=encoding) + + assert result == (True, "é") + with open(dotenv_file, "r", encoding=encoding) as f: + assert f.read() == "" + + def test_unset_non_existent_file(tmp_path): nx_file = str(tmp_path / "nx") logger = logging.getLogger("dotenv.main")
encoding not an option using set_key You can create and read a .env using the main.DotEnv class with an encoding, but the functions of setting key/unsetting do not act accordingly with the encoding set nor have an option to set it. - They both use the rewrite contextmanager
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_set_key_encoding", "tests/test_main.py::test_get_key_encoding", "tests/test_main.py::test_unset_encoding" ]
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key[-a--expected0-a=''\\n]", "tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]", "tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]", "tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]", "tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]", "tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]", "tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]", "tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]", "tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]", "tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]", "tests/test_main.py::test_set_key[a=b-c-d-expected12-a=b\\nc='d'\\n]", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_no_file_verbose", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override", "tests/test_main.py::test_load_dotenv_string_io_utf_8", "tests/test_main.py::test_load_dotenv_file_stream", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]", "tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]", "tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]", "tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]", "tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]", "tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]", "tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]", "tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]", "tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]", "tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]", "tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]", "tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]", "tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]", "tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]", "tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]", "tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]", "tests/test_main.py::test_dotenv_values_file_stream" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-02-20T10:09:15Z"
bsd-3-clause
theskumar__python-dotenv-388
diff --git a/src/dotenv/main.py b/src/dotenv/main.py index e7ad430..7841066 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -87,6 +87,9 @@ class DotEnv(): """ Load the current dotenv as system environment variable. """ + if not self.dict(): + return False + for k, v in self.dict().items(): if k in os.environ and not self.override: continue @@ -324,6 +327,8 @@ def load_dotenv( override: Whether to override the system environment variables with the variables from the `.env` file. encoding: Encoding to be used to read the file. + Returns: + Bool: True if atleast one environment variable is set elese False If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the .env file.
theskumar/python-dotenv
29bceb836965de5bc498af401fd9d2e95194a5c1
diff --git a/tests/test_main.py b/tests/test_main.py index ca14b1a..82c73ba 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -259,8 +259,9 @@ def test_load_dotenv_no_file_verbose(): logger = logging.getLogger("dotenv.main") with mock.patch.object(logger, "info") as mock_info: - dotenv.load_dotenv('.does_not_exist', verbose=True) + result = dotenv.load_dotenv('.does_not_exist', verbose=True) + assert result is False mock_info.assert_called_once_with("Python-dotenv could not find configuration file %s.", ".does_not_exist")
load_dotenv() returns True even if .env file is not found This behaviour is not ideal since if the configuration file is not found in the filesystem it may be better to return False. In this way the user can write something like: ```python if load_dotenv(): #do stuff else print("No file .env found") ``` # Steps to reproduce ``` In [1]: import dotenv In [2]: dotenv.load_dotenv() Out[2]: True In [3]: dotenv.find_dotenv() Out[3]: '' ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_load_dotenv_no_file_verbose" ]
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key[-a--expected0-a=''\\n]", "tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]", "tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]", "tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]", "tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]", "tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]", "tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]", "tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]", "tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]", "tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]", "tests/test_main.py::test_set_key[a=b-c-d-expected12-a=b\\nc='d'\\n]", "tests/test_main.py::test_set_key_encoding", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_encoding", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_encoding", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override", "tests/test_main.py::test_load_dotenv_string_io_utf_8", "tests/test_main.py::test_load_dotenv_file_stream", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]", "tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]", "tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]", "tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]", "tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]", "tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]", "tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]", "tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]", "tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]", "tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]", "tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]", "tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]", "tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]", "tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]", "tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]", "tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]", "tests/test_main.py::test_dotenv_values_file_stream" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2022-03-15T01:49:41Z"
bsd-3-clause
theskumar__python-dotenv-407
diff --git a/README.md b/README.md index eb6bb53..a9d19bf 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,11 @@ $ dotenv set EMAIL [email protected] $ dotenv list USER=foo [email protected] +$ dotenv list --format=json +{ + "USER": "foo", + "EMAIL": "[email protected]" +} $ dotenv run -- python foo.py ``` diff --git a/src/dotenv/cli.py b/src/dotenv/cli.py index 3411e34..b845b95 100644 --- a/src/dotenv/cli.py +++ b/src/dotenv/cli.py @@ -1,4 +1,6 @@ +import json import os +import shlex import sys from subprocess import Popen from typing import Any, Dict, List @@ -36,7 +38,11 @@ def cli(ctx: click.Context, file: Any, quote: Any, export: Any) -> None: @cli.command() @click.pass_context -def list(ctx: click.Context) -> None: [email protected]('--format', default='simple', + type=click.Choice(['simple', 'json', 'shell', 'export']), + help="The format in which to display the list. Default format is simple, " + "which displays name=value without quotes.") +def list(ctx: click.Context, format: bool) -> None: '''Display all the stored key/value.''' file = ctx.obj['FILE'] if not os.path.isfile(file): @@ -45,8 +51,16 @@ def list(ctx: click.Context) -> None: ctx=ctx ) dotenv_as_dict = dotenv_values(file) - for k, v in dotenv_as_dict.items(): - click.echo('%s=%s' % (k, v)) + if format == 'json': + click.echo(json.dumps(dotenv_as_dict, indent=2, sort_keys=True)) + else: + prefix = 'export ' if format == 'export' else '' + for k in sorted(dotenv_as_dict): + v = dotenv_as_dict[k] + if v is not None: + if format in ('export', 'shell'): + v = shlex.quote(v) + click.echo('%s%s=%s' % (prefix, k, v)) @cli.command()
theskumar/python-dotenv
2f36c082c278bad1a84411f1ad61547f95cecdb8
diff --git a/tests/test_cli.py b/tests/test_cli.py index 223476f..ca5ba2a 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -2,19 +2,38 @@ import os import pytest import sh - +from typing import Optional import dotenv from dotenv.cli import cli as dotenv_cli from dotenv.version import __version__ -def test_list(cli, dotenv_file): [email protected]( + "format,content,expected", + ( + (None, "x='a b c'", '''x=a b c\n'''), + ("simple", "x='a b c'", '''x=a b c\n'''), + ("simple", """x='"a b c"'""", '''x="a b c"\n'''), + ("simple", '''x="'a b c'"''', '''x='a b c'\n'''), + ("json", "x='a b c'", '''{\n "x": "a b c"\n}\n'''), + ("shell", "x='a b c'", "x='a b c'\n"), + ("shell", """x='"a b c"'""", '''x='"a b c"'\n'''), + ("shell", '''x="'a b c'"''', '''x=''"'"'a b c'"'"''\n'''), + ("shell", "x='a\nb\nc'", "x='a\nb\nc'\n"), + ("export", "x='a b c'", '''export x='a b c'\n'''), + ) +) +def test_list(cli, dotenv_file, format: Optional[str], content: str, expected: str): with open(dotenv_file, "w") as f: - f.write("a=b") + f.write(content + '\n') + + args = ['--file', dotenv_file, 'list'] + if format is not None: + args.extend(['--format', format]) - result = cli.invoke(dotenv_cli, ['--file', dotenv_file, 'list']) + result = cli.invoke(dotenv_cli, args) - assert (result.exit_code, result.output) == (0, result.output) + assert (result.exit_code, result.output) == (0, expected) def test_list_non_existent_file(cli):
--format= option for CLI list command It would be nice to have a way to dump all variables as JSON, which can be used by many other tools. I'd propose addition of a --json or -j option on the "list" command. I'd be happy to submit a pull request if it sounds interesting.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_cli.py::test_list[simple-x='a", "tests/test_cli.py::test_list[simple-x='\"a", "tests/test_cli.py::test_list[simple-x=\"'a", "tests/test_cli.py::test_list[json-x='a", "tests/test_cli.py::test_list[shell-x='a", "tests/test_cli.py::test_list[shell-x='\"a", "tests/test_cli.py::test_list[shell-x=\"'a", "tests/test_cli.py::test_list[shell-x='a\\nb\\nc'-x='a\\nb\\nc'\\n]", "tests/test_cli.py::test_list[export-x='a" ]
[ "tests/test_cli.py::test_list[None-x='a", "tests/test_cli.py::test_list_non_existent_file", "tests/test_cli.py::test_list_no_file", "tests/test_cli.py::test_get_existing_value", "tests/test_cli.py::test_get_non_existent_value", "tests/test_cli.py::test_get_no_file", "tests/test_cli.py::test_unset_existing_value", "tests/test_cli.py::test_unset_non_existent_value", "tests/test_cli.py::test_set_quote_options[always-a-x-a='x'\\n]", "tests/test_cli.py::test_set_quote_options[never-a-x-a=x\\n]", "tests/test_cli.py::test_set_quote_options[auto-a-x-a=x\\n]", "tests/test_cli.py::test_set_quote_options[auto-a-x", "tests/test_cli.py::test_set_quote_options[auto-a-$-a='$'\\n]", "tests/test_cli.py::test_set_export[.nx_file-true-a-x-export", "tests/test_cli.py::test_set_export[.nx_file-false-a-x-a='x'\\n]", "tests/test_cli.py::test_set_non_existent_file", "tests/test_cli.py::test_set_no_file", "tests/test_cli.py::test_run_with_other_env", "tests/test_cli.py::test_run_without_cmd", "tests/test_cli.py::test_run_with_invalid_cmd", "tests/test_cli.py::test_run_with_version" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-05-19T20:16:19Z"
bsd-3-clause
theskumar__python-dotenv-414
diff --git a/README.md b/README.md index a9d19bf..983b7d1 100644 --- a/README.md +++ b/README.md @@ -163,7 +163,7 @@ The format is not formally specified and still improves over time. That being s Keys can be unquoted or single-quoted. Values can be unquoted, single- or double-quoted. Spaces before and after keys, equal signs, and values are ignored. Values can be followed -by a comment. Lines can start with the `export` directive, which has no effect on their +by a comment. Lines can start with the `export` directive, which does not affect their interpretation. Allowed escape sequences: diff --git a/src/dotenv/main.py b/src/dotenv/main.py index 05d377a..3321788 100644 --- a/src/dotenv/main.py +++ b/src/dotenv/main.py @@ -125,15 +125,16 @@ def rewrite( path: Union[str, os.PathLike], encoding: Optional[str], ) -> Iterator[Tuple[IO[str], IO[str]]]: + dest = None try: if not os.path.isfile(path): with open(path, "w+", encoding=encoding) as source: source.write("") - with tempfile.NamedTemporaryFile(mode="w+", delete=False, encoding=encoding) as dest: - with open(path, encoding=encoding) as source: - yield (source, dest) # type: ignore + dest = tempfile.NamedTemporaryFile(mode="w+", delete=False, encoding=encoding) + with open(path, encoding=encoding) as source: + yield (source, dest) # type: ignore except BaseException: - if os.path.isfile(dest.name): + if dest and os.path.isfile(dest.name): os.unlink(dest.name) raise else:
theskumar/python-dotenv
914c68ef0e4c2c085d2753f5cbbf304852f37850
diff --git a/tests/test_main.py b/tests/test_main.py index 82c73ba..84a982f 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -22,6 +22,11 @@ def test_set_key_no_file(tmp_path): assert os.path.exists(nx_file) +def test_set_key_invalid_file(): + with pytest.raises(TypeError): + result = dotenv.set_key(None, "foo", "bar") + + @pytest.mark.parametrize( "before,key,value,expected,after", [
Error Handling in rewrite is incorrect. Look at: https://github.com/theskumar/python-dotenv/blob/master/src/dotenv/main.py#L136 If lines 136, 137 or 140 there are ever hit, an error "local variable 'dest' referenced before assignment" will be thrown, because the `dest` variable only exists within the scope of the `with` block above.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_main.py::test_set_key_invalid_file" ]
[ "tests/test_main.py::test_set_key_no_file", "tests/test_main.py::test_set_key[-a--expected0-a=''\\n]", "tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]", "tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]", "tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]", "tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]", "tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]", "tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]", "tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]", "tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]", "tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]", "tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]", "tests/test_main.py::test_set_key[a=b-c-d-expected12-a=b\\nc='d'\\n]", "tests/test_main.py::test_set_key_encoding", "tests/test_main.py::test_get_key_no_file", "tests/test_main.py::test_get_key_not_found", "tests/test_main.py::test_get_key_ok", "tests/test_main.py::test_get_key_encoding", "tests/test_main.py::test_get_key_none", "tests/test_main.py::test_unset_with_value", "tests/test_main.py::test_unset_no_value", "tests/test_main.py::test_unset_encoding", "tests/test_main.py::test_unset_non_existent_file", "tests/test_main.py::test_find_dotenv_no_file_raise", "tests/test_main.py::test_find_dotenv_no_file_no_raise", "tests/test_main.py::test_find_dotenv_found", "tests/test_main.py::test_load_dotenv_existing_file", "tests/test_main.py::test_load_dotenv_no_file_verbose", "tests/test_main.py::test_load_dotenv_existing_variable_no_override", "tests/test_main.py::test_load_dotenv_existing_variable_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override", "tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override", "tests/test_main.py::test_load_dotenv_string_io_utf_8", "tests/test_main.py::test_load_dotenv_file_stream", "tests/test_main.py::test_load_dotenv_in_current_dir", "tests/test_main.py::test_dotenv_values_file", "tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]", "tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]", "tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]", "tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]", "tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]", "tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]", "tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]", "tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]", "tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]", "tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]", "tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]", "tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]", "tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]", "tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]", "tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]", "tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]", "tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]", "tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]", "tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]", "tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]", "tests/test_main.py::test_dotenv_values_file_stream" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-07-27T07:28:33Z"
bsd-3-clause
theskumar__python-dotenv-52
diff --git a/README.rst b/README.rst index 936a5a2..8b2a039 100644 --- a/README.rst +++ b/README.rst @@ -126,7 +126,8 @@ update your settings on remote server, handy isn't it! file in current working directory. -q, --quote [always|never|auto] Whether to quote or not the variable values. - Default mode is always. + Default mode is always. This does not affect + parsing. --help Show this message and exit. Commands: diff --git a/dotenv/cli.py b/dotenv/cli.py index 9a99314..125a0a8 100644 --- a/dotenv/cli.py +++ b/dotenv/cli.py @@ -11,7 +11,7 @@ from .main import get_key, dotenv_values, set_key, unset_key help="Location of the .env file, defaults to .env file in current working directory.") @click.option('-q', '--quote', default='always', type=click.Choice(['always', 'never', 'auto']), - help="Whether to quote or not the variable values. Default mode is always.") + help="Whether to quote or not the variable values. Default mode is always. This does not affect parsing.") @click.pass_context def cli(ctx, file, quote): '''This script is used to set, get or unset values from a .env file.''' diff --git a/dotenv/main.py b/dotenv/main.py index 2fe1a83..ceac3fa 100644 --- a/dotenv/main.py +++ b/dotenv/main.py @@ -103,7 +103,7 @@ def parse_dotenv(dotenv_path): k, v = k.strip(), v.strip() if len(v) > 0: - quoted = v[0] == v[len(v) - 1] == '"' + quoted = v[0] == v[len(v) - 1] in ['"', "'"] if quoted: v = decode_escaped(v[1:-1])
theskumar/python-dotenv
9552db8d8c25753ec4f1a724f64d895b9daa6296
diff --git a/tests/test_cli.py b/tests/test_cli.py index d78172b..449b54a 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -46,6 +46,18 @@ def test_key_value_without_quotes(): sh.rm(dotenv_path) +def test_value_with_quotes(): + with open(dotenv_path, 'w') as f: + f.write('TEST="two words"\n') + assert dotenv.get_key(dotenv_path, 'TEST') == 'two words' + sh.rm(dotenv_path) + + with open(dotenv_path, 'w') as f: + f.write("TEST='two words'\n") + assert dotenv.get_key(dotenv_path, 'TEST') == 'two words' + sh.rm(dotenv_path) + + def test_unset(): sh.touch(dotenv_path) success, key_to_set, value_to_set = dotenv.set_key(dotenv_path, 'HELLO', 'WORLD') @@ -104,6 +116,13 @@ def test_get_key_with_interpolation(cli): dotenv.set_key(dotenv_path, 'FOO', '${HELLO}') dotenv.set_key(dotenv_path, 'BAR', 'CONCATENATED_${HELLO}_POSIX_VAR') + lines = list(open(dotenv_path, "r").readlines()) + assert lines == [ + 'HELLO="WORLD"\n', + 'FOO="${HELLO}"\n', + 'BAR="CONCATENATED_${HELLO}_POSIX_VAR"\n', + ] + # test replace from variable in file stored_value = dotenv.get_key(dotenv_path, 'FOO') assert stored_value == 'WORLD'
Wrong parsing of env variables in single quotes I have the following `.env` file: ``` DATABASE_URL='postgres://localhost:5432/myapp_development' ``` When I run `dotenv get DATABASE_URL` this is what I get: `DATABASE_URL="'postgres://localhost:5432/simulator_development'"` When I try to use this with [dj-database-url](https://github.com/kennethreitz/dj-database-url) it is failing to parse the `DATABASE_URL` environment variable as it is. It seems using single quotes in the `.env` file is causing this. It would be nice if this were documented somewhere if the behavior is intended I spent quite a bit of time trying to figure out where the error was. Thanks 😃
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_cli.py::test_value_with_quotes" ]
[ "tests/test_cli.py::test_get_key", "tests/test_cli.py::test_list", "tests/test_cli.py::test_key_value_without_quotes", "tests/test_cli.py::test_unset", "tests/test_cli.py::test_console_script", "tests/test_cli.py::test_get_key_with_interpolation", "tests/test_cli.py::test_get_key_with_interpolation_of_unset_variable" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2017-03-30T09:50:38Z"
bsd-3-clause
thinkingmachines__geomancer-48
diff --git a/geomancer/spells/__init__.py b/geomancer/spells/__init__.py index 92e6755..501b5d4 100644 --- a/geomancer/spells/__init__.py +++ b/geomancer/spells/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from .distance_to_nearest import DistanceToNearest +from .length_of import LengthOf from .number_of import NumberOf - -__all__ = ["DistanceToNearest", "NumberOf"] +__all__ = ["DistanceToNearest", "NumberOf", "LengthOf"] diff --git a/geomancer/spells/length_of.py b/geomancer/spells/length_of.py new file mode 100644 index 0000000..2bb308f --- /dev/null +++ b/geomancer/spells/length_of.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +# Import modules +from sqlalchemy import func +from sqlalchemy.sql import select + +from .base import Spell +from ..backend.cores.bq import BigQueryCore + +from loguru import logger + + +class LengthOf(Spell): + """Obtain the length of all Lines-of-Interest within a certain radius""" + + def __init__(self, on, within=10 * 1000, **kwargs): + """Spell constructor + + Parameters + ---------- + on : str + Feature class to compare upon + within : float, optional + Look for values within a particular range. Its value is in meters, + the default is :code:`10,000` meters. + source_table : str + Table URI to run queries against. + feature_name : str + Column name for the output feature. + column : str, optional + Column to look the geometries from. The default is :code:`WKT` + options : geomancer.Config + Specify configuration for interacting with the database backend. + Default is a BigQuery Configuration + """ + super(LengthOf, self).__init__(**kwargs) + logger.warning( + "ST_Buffer is not yet implemented so BigQueryCore won't work: groups.google.com/d/msg/bq-gis-feedback/Yq4Ku6u2A80/ceVXU01RCgAJ" + ) + self.source_column, self.source_filter = self.extract_columns(on) + self.within = within + + def query(self, source, target, core, column): + # ST_Buffer is not yet implemented so BigQueryCore won't work + # (groups.google.com/d/msg/bq-gis-feedback/Yq4Ku6u2A80/ceVXU01RCgAJ) + if isinstance(core, BigQueryCore): + raise ValueError( + "The LengthOf feature is currently incompatible with \ + BigQueryCore because ST_Buffer is not yet implemented" + ) + + # Get all lines-of-interests (LOIs) of fclass `on` + lois = select( + [source.c[self.source_id], source.c.WKT], + source.c[self.source_column] == self.source_filter, + ).cte("lois") + + # Create a buffer `within` a distance/radius around each centroid. + # The point has to be converted to EPSG:3857 so that meters can be + # used instead of decimal degrees for EPSG:4326. + buff = select( + [ + target, + func.ST_Buffer( + core.ST_GeoFromText(target.c[column]), self.within + ).label("__buffer__"), + ] + ).cte("buff") + + # Clip the LOIs with the buffers then calculate the length of all + # LOIs inside each buffer. + clip = select( + [ + buff, + func.ST_Intersection( + core.ST_GeoFromText(lois.c.WKT), + func.ST_Transform(buff.c["__buffer__"], 4326), + ).label("__geom__"), + func.ST_Length( + func.ST_Intersection( + func.ST_Transform( + core.ST_GeoFromText(lois.c.WKT), 3857 + ), + buff.c["__buffer__"], + ) + ).label("__len__"), + ], + func.ST_Intersects( + core.ST_GeoFromText(lois.c.WKT), + func.ST_Transform(buff.c["__buffer__"], 4326), + ), + ).cte("clip") + + # Sum the length of all LOIs inside each buffer + sum_length = ( + select( + [ + clip.c["__index_level_0__"], + func.sum(clip.c["__len__"]).label(self.feature_name), + ] + ) + .select_from(clip) + .group_by(clip.c["__index_level_0__"]) + .cte("sum_length") + ) + + # Join the sum of the length of all LOIs inside each buffer + query = select( + [ + col + for col in sum_length.columns + if col.key not in ("__len__", "__geom__", "__buffer__") + ], + sum_length.c["__index_level_0__"] == buff.c["__index_level_0__"], + ) + return query
thinkingmachines/geomancer
fbc074eaa9d3e8e7d439da79bcb6fbfd6d0f8ae4
diff --git a/tests/spells/test_length_of.py b/tests/spells/test_length_of.py new file mode 100644 index 0000000..4fd5d72 --- /dev/null +++ b/tests/spells/test_length_of.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +# Import modules +import pytest +from google.cloud import bigquery +from tests.spells.base_test_spell import BaseTestSpell, SpellDB + +# Import from package +from geomancer.backend.settings import SQLiteConfig +from geomancer.spells import LengthOf + +params = [ + SpellDB( + spell=LengthOf( + on="residential", + within=50, + source_table="gis_osm_roads_free_1", + feature_name="len_residential", + options=SQLiteConfig(), + ), + dburl="sqlite:///tests/data/source.sqlite", + ) +] + + [email protected] +class TestLengthOf(BaseTestSpell): + @pytest.fixture(params=params, ids=["roads-sqlite"]) + def spelldb(self, request): + return request.param
Add LengthOf function The `LengthOf` function would compute the length of all line features inside a certain circular `radius` centered at a location with a certain `lat` and `lon`.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/spells/test_length_of.py::TestLengthOf::test_extract_columns_return_values[roads-sqlite-fclass:embassy]", "tests/spells/test_length_of.py::TestLengthOf::test_extract_columns_return_values[roads-sqlite-embassy]" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2019-03-22T12:37:57Z"
mit
thinkingmachines__geomancer-49
diff --git a/geomancer/backend/settings.py b/geomancer/backend/settings.py index c23a36e..6a43bad 100644 --- a/geomancer/backend/settings.py +++ b/geomancer/backend/settings.py @@ -45,7 +45,7 @@ class BQConfig(Config): @property def name(self): - return "bq" + return "bigquery" DATASET_ID = "geomancer" EXPIRY = 3 diff --git a/geomancer/spellbook/__init__.py b/geomancer/spellbook/__init__.py new file mode 100644 index 0000000..b27808e --- /dev/null +++ b/geomancer/spellbook/__init__.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- + +"""A :code:`SpellBook` is a collection of spells that can be sequentially casted and +merged in a single dataframe + + >>> from geomancer.spells import DistanceOf, NumberOf + >>> from geomancer.spellbook import SpellBook + >>> spellbook = SpellBook( + spells=[ + DistanceOf(...), + NumberOf(...), + ], + ) + >>> df = ... + >>> df_with_features = spellbook.cast(df) + +:code:`SpellBook`s can be distributed by exporting them to JSON files. + + >>> spellbook.author = "My Name" + >>> spellbook.description = "My Features" + >>> spellbook.to_json("my_features.json") + +Now other people can easily reuse your feature extractions in with their own datasets! + + >>> spellbook = SpellBook.read_json("my_features.json") + >>> my_df = ... + >>> my_df_with_features = spellbook.cast(my_df) +""" + +# Import standard library +import importlib +import json + +# Import modules +import pandas as pd + + +class SpellBook(object): + def __init__(self, spells, column="WKT", author=None, description=None): + """SpellBook constructor + + Parameters + ---------- + spells : list of :class:`geomancer.spells.Spell` + List of spell instances. + column : str, optional + Column to look the geometries from. The default is :code:`WKT` + author : str, optional + Author of the spell book + description : str, optional + Description of the spell book + """ + self.column = column + self.spells = spells + self.author = author + self.description = description + + def cast(self, df): + """Runs the cast method of each spell in the spell book + + Parameters + ---------- + df : pandas.DataFrame + Dataframe containing the points to compare upon. By default, we + will look into the :code:`geometry` column. You can specify your + own column by passing an argument to the :code:`column` parameter. + + Returns + ------- + pandas.DataFrame + Output dataframe with the features from all spells + """ + for spell in self.spells: + df = df.join( + spell.cast( + df, column=self.column, features_only=True + ).set_index("__index_level_0__") + ) + return df + + def to_json(self, filename=None, **kwargs): + """Exports spell book as a JSON string + + Parameters + ---------- + filename : str, optional + Output filename. If none is given, output is returned + + Returns + ------- + str or None + Export of spell book in JSON format + """ + obj = { + **self.__dict__, + "spells": [ + { + **s.__dict__, + "module": type(s).__module__, + "type": type(s).__name__, + } + for s in self.spells + ], + } + if filename: + with open(filename, "w") as f: + json.dump(obj, f, **kwargs) + else: + return json.dumps(obj, **kwargs) + + @classmethod + def _instantiate_spells(cls, spells): + for spell in spells: + mod = importlib.import_module(spell.pop("module")) + spell_cls = getattr(mod, spell.pop("type")) + on = "{}:{}".format( + spell.pop("source_column"), spell.pop("source_filter") + ) + yield spell_cls(on, **spell) + + @classmethod + def read_json(cls, filename): + """Reads a JSON exported spell book + + Parameters + ---------- + filename : str + Filename of JSON file to read. + + Returns + ------- + :class:`geomancer.spellbook.SpellBook` + :code:`SpellBook` instance parsed from given JSON file. + """ + with open(filename) as f: + obj = json.load(f) + obj["spells"] = cls._instantiate_spells(obj.pop("spells")) + return cls(**obj) diff --git a/geomancer/spells/base.py b/geomancer/spells/base.py index 34cd0e3..c82854c 100644 --- a/geomancer/spells/base.py +++ b/geomancer/spells/base.py @@ -37,8 +37,8 @@ class Spell(abc.ABC): self, source_table, feature_name, - column="WKT", source_id="osm_id", + dburl=None, options=None, ): """Spell constructor @@ -49,17 +49,17 @@ class Spell(abc.ABC): Table URI to run queries against. feature_name : str Column name for the output feature. - column : str, optional - Column to look the geometries from. The default is :code:`WKT` + dburl : str, optional + Database url used to configure backend connection options : geomancer.Config, optional Specify configuration for interacting with the database backend. Auto-detected if not set. """ self.source_table = source_table self.feature_name = feature_name - self.options = options - self.column = column self.source_id = source_id + self.dburl = dburl + self.options = options def extract_columns(self, x): """Spell constructor @@ -120,8 +120,22 @@ class Spell(abc.ABC): """ raise NotImplementedError - @logger.catch - def cast(self, df, dburl): + def _include_column(self, col, keep_index, features_only): + if features_only: + return col.key in ("__index_level_0__", self.feature_name) + if keep_index: + return True + return col.key != "__index_level_0__" + + @logger.catch(reraise=True) + def cast( + self, + df, + dburl=None, + column="WKT", + keep_index=False, + features_only=False, + ): """Apply the feature transform to an input pandas.DataFrame If using BigQuery, a :code:`google.cloud.client.Client` @@ -133,14 +147,28 @@ class Spell(abc.ABC): Dataframe containing the points to compare upon. By default, we will look into the :code:`geometry` column. You can specify your own column by passing an argument to the :code:`column` parameter. - dburl : str + dburl : str, optional Database url used to configure backend connection + column : str, optional + Column to look the geometries from. The default is :code:`WKT` + keep_index : boolean, optional + Include index in output dataframe + features_only : boolean, optional + Only return features as output dataframe. Automatically sets + :code:`keep_index` to :code:`True`. Returns ------- pandas.DataFrame Output dataframe with the features per given point """ + dburl = dburl or self.dburl + if not dburl: + raise ValueError("dburl was not supplied") + + if features_only: + keep_index = True + core = self.get_core(dburl) # Get engine @@ -152,11 +180,15 @@ class Spell(abc.ABC): ) # Build query - query = self.query(source, target, core) + query = self.query(source, target, core, column) - # Remove temporary index column + # Filter output columns query = select( - [col for col in query.columns if col.key != "__index_level_0__"] + [ + col + for col in query.columns + if self._include_column(col, keep_index, features_only) + ] ).select_from(query) # Perform query diff --git a/geomancer/spells/distance_to_nearest.py b/geomancer/spells/distance_to_nearest.py index 707310e..df53544 100644 --- a/geomancer/spells/distance_to_nearest.py +++ b/geomancer/spells/distance_to_nearest.py @@ -26,15 +26,15 @@ class DistanceToNearest(Spell): Column name for the output feature. column : str, optional Column to look the geometries from. The default is :code:`WKT` - options : geomancer.Config + options : geomancer.Config, optional Specify configuration for interacting with the database backend. - Default is a BigQuery Configuration + Auto-detected if not set. """ super(DistanceToNearest, self).__init__(**kwargs) self.source_column, self.source_filter = self.extract_columns(on) self.within = within - def query(self, source, target, core): + def query(self, source, target, core, column): # Get all POIs of fclass `on` pois = select( [source.c[self.source_id], source.c.WKT], @@ -42,7 +42,7 @@ class DistanceToNearest(Spell): ).cte("pois") # Compute the distance from `column` to each POI within given distance distance = func.ST_Distance( - core.ST_GeoFromText(target.c[self.column]), + core.ST_GeoFromText(target.c[column]), core.ST_GeoFromText(pois.c.WKT), ) pairs = ( diff --git a/geomancer/spells/number_of.py b/geomancer/spells/number_of.py index 3865177..b42e57b 100644 --- a/geomancer/spells/number_of.py +++ b/geomancer/spells/number_of.py @@ -26,15 +26,15 @@ class NumberOf(Spell): Column name for the output feature. column : str, optional Column to look the geometries from. The default is :code:`WKT` - options : geomancer.Config + options : geomancer.Config, optional Specify configuration for interacting with the database backend. - Default is a BigQuery Configuration + Auto-detected if not set. """ super(NumberOf, self).__init__(**kwargs) self.source_column, self.source_filter = self.extract_columns(on) self.within = within - def query(self, source, target, core): + def query(self, source, target, core, column): # Get all POIs of fclass `on` pois = select( [source.c[self.source_id], source.c.WKT], @@ -42,7 +42,7 @@ class NumberOf(Spell): ).cte("pois") # Compute the distance from `column` to each POI within given distance distance = func.ST_Distance( - core.ST_GeoFromText(target.c[self.column]), + core.ST_GeoFromText(target.c[column]), core.ST_GeoFromText(pois.c.WKT), ) pairs = (
thinkingmachines/geomancer
1e836d8cddb8c8c1f958775cb27a4a1e9e06523d
diff --git a/tests/conftest.py b/tests/conftest.py index 5953f3f..65193fd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,3 +10,9 @@ def sample_points(): """Return a set of POINTS in a pandas.DataFrame""" df = pd.read_csv("tests/data/sample_points.csv") return df + + [email protected] +def spellbook_json(): + with open("tests/data/spellbook.json") as f: + return f.read() diff --git a/tests/data/spellbook.json b/tests/data/spellbook.json new file mode 100644 index 0000000..80f4339 --- /dev/null +++ b/tests/data/spellbook.json @@ -0,0 +1,1 @@ +{"column": "WKT", "spells": [{"source_table": "gis_osm_pois_free_1", "feature_name": "dist_supermarket", "source_id": "osm_id", "dburl": "sqlite:///tests/data/source.sqlite", "options": null, "source_column": "fclass", "source_filter": "supermarket", "within": 10000, "module": "geomancer.spells.distance_to_nearest", "type": "DistanceToNearest"}, {"source_table": "gis_osm_pois_free_1", "feature_name": "num_embassy", "source_id": "osm_id", "dburl": "sqlite:///tests/data/source.sqlite", "options": null, "source_column": "fclass", "source_filter": "embassy", "within": 10000, "module": "geomancer.spells.number_of", "type": "NumberOf"}], "author": null, "description": null} \ No newline at end of file diff --git a/tests/spellbook/test_spellbook.py b/tests/spellbook/test_spellbook.py new file mode 100644 index 0000000..ca65cea --- /dev/null +++ b/tests/spellbook/test_spellbook.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- + +# Import modules +import pandas as pd +import pytest + +# Import from package +from geomancer.spellbook import SpellBook +from geomancer.spells import DistanceToNearest, NumberOf + + [email protected]("sample_points") +def test_spell_dburl(sample_points): + with pytest.raises(ValueError, match="dburl was not supplied"): + spell = DistanceToNearest( + on="embassy", + source_table="gis_osm_pois_free_1", + feature_name="dist_embassy", + ) + spell.cast(sample_points) + + [email protected]("sample_points") +def test_spell_keep_index(sample_points): + spell = DistanceToNearest( + on="embassy", + source_table="gis_osm_pois_free_1", + feature_name="dist_embassy", + ) + df = spell.cast( + sample_points, + dburl="sqlite:///tests/data/source.sqlite", + keep_index=True, + ) + assert "__index_level_0__" in df.columns + df = spell.cast( + sample_points, + dburl="sqlite:///tests/data/source.sqlite", + keep_index=False, + ) + assert "__index_level_0__" not in df.columns + + [email protected]("sample_points") +def test_spell_features_only(sample_points): + spell = DistanceToNearest( + on="embassy", + source_table="gis_osm_pois_free_1", + feature_name="dist_embassy", + ) + df = spell.cast( + sample_points, + dburl="sqlite:///tests/data/source.sqlite", + features_only=True, + ) + assert ["__index_level_0__", "dist_embassy"] == df.columns.tolist() + + [email protected] +def spellbook(): + return SpellBook( + [ + DistanceToNearest( + "supermarket", + source_table="gis_osm_pois_free_1", + feature_name="dist_supermarket", + dburl="sqlite:///tests/data/source.sqlite", + ), + NumberOf( + on="embassy", + source_table="gis_osm_pois_free_1", + feature_name="num_embassy", + dburl="sqlite:///tests/data/source.sqlite", + ), + ] + ) + + [email protected]("spellbook", "sample_points") +def test_spellbook_spells(spellbook, sample_points): + df = spellbook.cast(sample_points) + assert "dist_supermarket" in df.columns + assert "num_embassy" in df.columns + + [email protected]("spellbook", "spellbook_json") +def test_spellbook_to_json(spellbook, spellbook_json): + assert spellbook.to_json() == spellbook_json + + [email protected]("spellbook", "spellbook_json") +def test_spellbook_to_json_file(spellbook, spellbook_json, tmpdir): + filename = "spellbook.json" + f = tmpdir.mkdir(__name__).join(filename) + spellbook.to_json(f.strpath) + f.read() == spellbook_json + + [email protected]("spellbook", "spellbook_json") +def test_spellbook_read_json(spellbook, spellbook_json, tmpdir): + filename = "spellbook.json" + f = tmpdir.mkdir(__name__).join(filename) + f.write(spellbook_json) + _spellbook = SpellBook.read_json(f.strpath) + assert _spellbook.column == spellbook.column + assert _spellbook.author == spellbook.author + assert _spellbook.description == spellbook.description + for i, spell in enumerate(_spellbook.spells): + assert spell.__dict__ == spellbook.spells[i].__dict__ diff --git a/tests/spells/base_test_spell.py b/tests/spells/base_test_spell.py index c8d0e3c..ec67e14 100644 --- a/tests/spells/base_test_spell.py +++ b/tests/spells/base_test_spell.py @@ -38,7 +38,9 @@ class BaseTestSpell: engine=engine, ) # Perform the test - query = spelldb.spell.query(source=source, target=target, core=core) + query = spelldb.spell.query( + source=source, target=target, core=core, column="WKT" + ) assert isinstance(query, ClauseElement) @pytest.mark.usefixtures("spelldb", "sample_points") diff --git a/tests/spells/test_distance_to_nearest.py b/tests/spells/test_distance_to_nearest.py index 43106e7..1bde1ed 100644 --- a/tests/spells/test_distance_to_nearest.py +++ b/tests/spells/test_distance_to_nearest.py @@ -16,13 +16,16 @@ params = [ ), dburl="sqlite:///tests/data/source.sqlite", ), - SpellDB( - spell=DistanceToNearest( - on="primary", - source_table="gis_osm_roads_free_1", - feature_name="dist_primary", + pytest.param( + SpellDB( + spell=DistanceToNearest( + on="primary", + source_table="gis_osm_roads_free_1", + feature_name="dist_primary", + ), + dburl="sqlite:///tests/data/source.sqlite", ), - dburl="sqlite:///tests/data/source.sqlite", + marks=pytest.mark.slow, ), pytest.param( SpellDB( diff --git a/tests/spells/test_number_of.py b/tests/spells/test_number_of.py index 9680f5e..2371e37 100644 --- a/tests/spells/test_number_of.py +++ b/tests/spells/test_number_of.py @@ -17,13 +17,16 @@ params = [ ), dburl="sqlite:///tests/data/source.sqlite", ), - SpellDB( - spell=NumberOf( - on="primary", - source_table="gis_osm_roads_free_1", - feature_name="num_primary", + pytest.param( + SpellDB( + spell=NumberOf( + on="primary", + source_table="gis_osm_roads_free_1", + feature_name="num_primary", + ), + dburl="sqlite:///tests/data/source.sqlite", ), - dburl="sqlite:///tests/data/source.sqlite", + marks=pytest.mark.slow, ), pytest.param( SpellDB(
Add SpellBook Usage ideas: ```python from geomancer import SpellBook # When you want to register spells my_spellbook = SpellBook([ DistanceToNearest("embassy", within=10000, source_table="tm-geospatial.ph_osm.pois"), # From BQ DistanceToNearest("hospital", within=5000, source_table="pois"), # From Spatialite ]) # You can then do multiple casts my_features = my_spellbook.cast(df, host=[bigquery.Client(), "tests/data/source.sqlite"]) # Saving the Spellbook my_spellbook.author = "Lj Miranda" # optional my_spellbook.description = "Some cool features for other stuff" # optional my_spellbook.to_json("path/to/my/own/features.json") ``` Some potential challenges: - It is possible to create a spellbook with spells coming from different warehouses (one feature from BQ, another from SQLite, etc.). However, setting the `source_table` and the `host` is decoupled (one during init, another during `cast()`). - Concatenating everything inside a dataframe (similar output column names, etc.). We should do some validation before doing concat? Some preliminary tasks: - Write down all possible metadata to include: things that are automatically generated (date? unique ID? etc.) and those that are manually set).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/spellbook/test_spellbook.py::test_spell_dburl", "tests/spellbook/test_spellbook.py::test_spellbook_to_json", "tests/spellbook/test_spellbook.py::test_spellbook_to_json_file", "tests/spellbook/test_spellbook.py::test_spellbook_read_json", "tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[pois-sqlite-fclass:embassy]", "tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[pois-sqlite-embassy]", "tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[roads-sqlite-fclass:embassy]", "tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[roads-sqlite-embassy]", "tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[pois-bq-fclass:embassy]", "tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[pois-bq-embassy]", "tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[roads-bq-fclass:embassy]", "tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[roads-bq-embassy]", "tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[pois-sqlite-fclass:embassy]", "tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[pois-sqlite-embassy]", "tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[roads-sqlite-fclass:embassy]", "tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[roads-sqlite-embassy]", "tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[pois-bq-fclass:embassy]", "tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[pois-bq-embassy]", "tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[roads-bq-fclass:embassy]", "tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[roads-bq-embassy]" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2019-03-22T12:41:10Z"
mit
thp__urlwatch-543
diff --git a/CHANGELOG.md b/CHANGELOG.md index 12ad2d1..ec27cfb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,8 @@ The format mostly follows [Keep a Changelog](http://keepachangelog.com/en/1.0.0/ - Unit tests have been migrated from `nose` to `pytest` and moved from `test/` to `lib/urlwatch/tests/` - The ``css`` and ``xpath`` filters now accept ``skip`` and ``maxitems`` as subfilter +- The ``shellpipe`` filter now inherits all environment variables (e.g. ``$PATH``) + of the ``urlwatch`` process ### Fixed diff --git a/lib/urlwatch/filters.py b/lib/urlwatch/filters.py index 390ac09..b11a3bb 100644 --- a/lib/urlwatch/filters.py +++ b/lib/urlwatch/filters.py @@ -788,10 +788,12 @@ class ShellPipeFilter(FilterBase): encoding = sys.getdefaultencoding() - env = { + # Work on a copy to not modify the outside environment + env = dict(os.environ) + env.update({ 'URLWATCH_JOB_NAME': self.job.pretty_name() if self.job else '', 'URLWATCH_JOB_LOCATION': self.job.get_location() if self.job else '', - } + }) return subprocess.check_output(subfilter['command'], shell=True, input=data.encode(encoding), env=env).decode(encoding)
thp/urlwatch
65507c55ff5f467687d4bef4ca4d99db55dce24a
diff --git a/lib/urlwatch/tests/test_filters.py b/lib/urlwatch/tests/test_filters.py index 80465f3..ca85120 100644 --- a/lib/urlwatch/tests/test_filters.py +++ b/lib/urlwatch/tests/test_filters.py @@ -71,3 +71,20 @@ def test_providing_subfilter_to_filter_without_subfilter_raises_valueerror(): def test_providing_unknown_subfilter_raises_valueerror(): with pytest.raises(ValueError): list(FilterBase.normalize_filter_list([{'grep': {'re': 'Price: .*', 'anothersubfilter': '42'}}])) + + +def test_shellpipe_inherits_environment_but_does_not_modify_it(): + # https://github.com/thp/urlwatch/issues/541 + + # Set a specific value to check it doesn't overwrite the current env + os.environ['URLWATCH_JOB_NAME'] = 'should-not-be-overwritten' + + # See if the shellpipe process can use a variable from the outside + os.environ['INHERITED_FROM'] = 'parent-process' + filtercls = FilterBase.__subclasses__.get('shellpipe') + result = filtercls(None, None).filter('input-string', {'command': 'echo "$INHERITED_FROM/$URLWATCH_JOB_NAME"'}) + # Check that the inherited value and the job name is set properly + assert result == 'parent-process/\n' + + # Check that outside the variable wasn't overwritten by the filter + assert os.environ['URLWATCH_JOB_NAME'] == 'should-not-be-overwritten'
The shellpipe filter is removing all environment variables The shellpipe filter is removing all environment variables, then adding URLWATCH_JOB_NAME and URLWATCH_JOB_LOCATION. I believe the code in filters.py should just be adding them to the existing environment. As a result (simple example shown below), the "sh" that's forked can run programs in system directories only because of fallback path-search behavior. Under Debian, where /bin/sh is a symlink to /bin/dash, an strace shows that "sh" searches /usr/local/{s,}bin /usr/{s,}bin} and /{s,}bin. urlwatch fails to find the shellpipe command when it's in ~/bin and isn't specified with its pathname. Specifying a "diff_tool" does find the command if it's in my ~/bin and doesn't empty the environment. I'm using v2.19 as installed by "pip3" (not the latest code on Github, though the env. handling appears to be the same). % env | grep PATH PATH=/home/stosh/bin:/usr/local/bin:/usr/bin:/bin % cat date_cat.yaml name: watchdog-cat command: "date" filter: - shellpipe: "env >/dev/tty; cat" % cat date_mycat.yaml name: watchdog-mycat command: "date" filter: - shellpipe: "env >/dev/tty; mycat" % cmp /bin/cat ~/bin/mycat % urlwatch --cache date.db --config ~/.config/urlwatch/urlwatch_orig.yaml --urls date_cat.yaml URLWATCH_JOB_NAME=watchdog-cat URLWATCH_JOB_LOCATION=date PWD=/home/stosh =========================================================================== 01. CHANGED: watchdog-cat =========================================================================== --------------------------------------------------------------------------- CHANGED: watchdog-cat ( date ) --------------------------------------------------------------------------- --- @ Tue, 28 Jul 2020 17:55:41 -0400 +++ @ Tue, 28 Jul 2020 17:56:27 -0400 @@ -1 +1 @@ -Tue Jul 28 17:55:41 EDT 2020 +Tue Jul 28 17:56:27 EDT 2020 --------------------------------------------------------------------------- -- urlwatch 2.19, Copyright 2008-2020 Thomas Perl Website: https://thp.io/2008/urlwatch/ watched 1 URLs in 0 seconds % urlwatch --cache date.db --config ~/.config/urlwatch/urlwatch_orig.yaml --urls date_mycat.yaml URLWATCH_JOB_NAME=watchdog-mycat URLWATCH_JOB_LOCATION=date PWD=/home/stosh /bin/sh: 1: mycat: not found =========================================================================== 01. ERROR: watchdog-mycat =========================================================================== --------------------------------------------------------------------------- ERROR: watchdog-mycat ( date ) --------------------------------------------------------------------------- Traceback (most recent call last): File "/home/stosh/.local/lib/python3.7/site-packages/urlwatch/handler.py", line 92, in process data = FilterBase.process(filter_kind, subfilter, self, data) File "/home/stosh/.local/lib/python3.7/site-packages/urlwatch/filters.py", line 145, in process return filtercls(state.job, state).filter(data, subfilter) File "/home/stosh/.local/lib/python3.7/site-packages/urlwatch/filters.py", line 784, in filter input=data.encode(encoding), env=env).decode(encoding) File "/usr/lib/python3.7/subprocess.py", line 395, in check_output **kwargs).stdout File "/usr/lib/python3.7/subprocess.py", line 487, in run output=stdout, stderr=stderr) subprocess.CalledProcessError: Command 'env >/dev/tty; mycat' returned non-zero exit status 127. --------------------------------------------------------------------------- -- urlwatch 2.19, Copyright 2008-2020 Thomas Perl Website: https://thp.io/2008/urlwatch/ watched 1 URLs in 0 seconds
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "lib/urlwatch/tests/test_filters.py::test_shellpipe_inherits_environment_but_does_not_modify_it" ]
[ "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[grep-output0]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[grep:foo-output1]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[beautify,grep:foo,html2text-output2]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[re.sub:.*-output3]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[re.sub-output4]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input5-output5]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input6-output6]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input7-output7]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input8-output8]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input9-output9]", "lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input10-output10]", "lib/urlwatch/tests/test_filters.py::test_filters[element_by_tag-test_data0]", "lib/urlwatch/tests/test_filters.py::test_filters[element_by_tag_nested-test_data1]", "lib/urlwatch/tests/test_filters.py::test_filters[element_by_id-test_data2]", "lib/urlwatch/tests/test_filters.py::test_filters[element_by_class-test_data3]", "lib/urlwatch/tests/test_filters.py::test_filters[xpath_elements-test_data4]", "lib/urlwatch/tests/test_filters.py::test_filters[xpath_text-test_data5]", "lib/urlwatch/tests/test_filters.py::test_filters[xpath_exclude-test_data6]", "lib/urlwatch/tests/test_filters.py::test_filters[xpath_xml_namespaces-test_data7]", "lib/urlwatch/tests/test_filters.py::test_filters[grep-test_data11]", "lib/urlwatch/tests/test_filters.py::test_filters[grep_with_comma-test_data12]", "lib/urlwatch/tests/test_filters.py::test_filters[json_format-test_data13]", "lib/urlwatch/tests/test_filters.py::test_filters[json_format_subfilter-test_data14]", "lib/urlwatch/tests/test_filters.py::test_filters[sha1-test_data15]", "lib/urlwatch/tests/test_filters.py::test_filters[hexdump-test_data16]", "lib/urlwatch/tests/test_filters.py::test_filters[sort-test_data17]", "lib/urlwatch/tests/test_filters.py::test_filters[sort_paragraphs-test_data18]", "lib/urlwatch/tests/test_filters.py::test_filters[sort_separator_reverse-test_data19]", "lib/urlwatch/tests/test_filters.py::test_filters[sort_reverse-test_data20]", "lib/urlwatch/tests/test_filters.py::test_filters[reverse_lines-test_data21]", "lib/urlwatch/tests/test_filters.py::test_filters[reverse_separator_dict-test_data22]", "lib/urlwatch/tests/test_filters.py::test_filters[reverse_separator_str-test_data23]", "lib/urlwatch/tests/test_filters.py::test_filters[reverse_separator_paragraph-test_data24]", "lib/urlwatch/tests/test_filters.py::test_filters[re_sub_multiline-test_data25]", "lib/urlwatch/tests/test_filters.py::test_invalid_filter_name_raises_valueerror", "lib/urlwatch/tests/test_filters.py::test_providing_subfilter_to_filter_without_subfilter_raises_valueerror", "lib/urlwatch/tests/test_filters.py::test_providing_unknown_subfilter_raises_valueerror" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2020-07-29T15:43:24Z"
bsd-3-clause
thp__urlwatch-785
diff --git a/CHANGELOG.md b/CHANGELOG.md index dbf2441..9e96ff9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ The format mostly follows [Keep a Changelog](http://keepachangelog.com/en/1.0.0/ ## UNRELEASED +### Added + +- New `enabled` option for all jobs. Set to false to disable a job without needing to remove it or comment it out (Requested in #625 by snowman, contributed in #785 by jamstah) + ### Changed - Remove EOL'd Python 3.7 (new minimum requirement is Python 3.8), add Python 3.12 testing @@ -15,6 +19,7 @@ The format mostly follows [Keep a Changelog](http://keepachangelog.com/en/1.0.0/ - Fix documentation for watching Github tags and releases, again (#723) - Fix `--test-reporter` command-line option so `separate` configuration option is no longer ignored when sending test notifications (#772, by marunjar) - Fix line height and dark mode regression (#774 reported by kongomongo, PRs #777 and #778 by trevorshannon) +- Fix compatibility with lxml >= 5 which caused the CSS Selector filter to fail (#783 reported by jamesquilty, PR #786 by jamstah) ## [2.28] -- 2023-05-03 diff --git a/docs/source/jobs.rst b/docs/source/jobs.rst index 8c55d58..e2c51f5 100644 --- a/docs/source/jobs.rst +++ b/docs/source/jobs.rst @@ -169,13 +169,14 @@ Optional keys for all job types - ``name``: Human-readable name/label of the job - ``filter``: :doc:`filters` (if any) to apply to the output (can be tested with ``--test-filter``) -- ``max_tries``: Number of times to retry fetching the resource +- ``max_tries``: After this many sequential failed runs, the error will be reported rather than ignored - ``diff_tool``: Command to a custom tool for generating diff text - ``diff_filter``: :doc:`filters` (if any) to apply to the diff result (can be tested with ``--test-diff-filter``) - ``treat_new_as_changed``: Will treat jobs that don't have any historic data as ``CHANGED`` instead of ``NEW`` (and create a diff for new jobs) - ``compared_versions``: Number of versions to compare for similarity - ``kind`` (redundant): Either ``url``, ``shell`` or ``browser``. Automatically derived from the unique key (``url``, ``command`` or ``navigate``) of the job type - ``user_visible_url``: Different URL to show in reports (e.g. when watched URL is a REST API URL, and you want to show a webpage) +- ``enabled``: Can be set to false to disable an individual job (default is ``true``) Setting keys for all jobs at once diff --git a/lib/urlwatch/filters.py b/lib/urlwatch/filters.py index 7b7c95b..ed21b4c 100644 --- a/lib/urlwatch/filters.py +++ b/lib/urlwatch/filters.py @@ -761,9 +761,9 @@ class LxmlParser: excluded_elems = None if self.filter_kind == 'css': selected_elems = CSSSelector(self.expression, - namespaces=self.namespaces).evaluate(root) + namespaces=self.namespaces)(root) excluded_elems = CSSSelector(self.exclude, - namespaces=self.namespaces).evaluate(root) if self.exclude else None + namespaces=self.namespaces)(root) if self.exclude else None elif self.filter_kind == 'xpath': selected_elems = root.xpath(self.expression, namespaces=self.namespaces) excluded_elems = root.xpath(self.exclude, namespaces=self.namespaces) if self.exclude else None diff --git a/lib/urlwatch/jobs.py b/lib/urlwatch/jobs.py index f4db821..d89f41f 100644 --- a/lib/urlwatch/jobs.py +++ b/lib/urlwatch/jobs.py @@ -196,7 +196,7 @@ class JobBase(object, metaclass=TrackSubClasses): class Job(JobBase): __required__ = () - __optional__ = ('name', 'filter', 'max_tries', 'diff_tool', 'compared_versions', 'diff_filter', 'treat_new_as_changed', 'user_visible_url') + __optional__ = ('name', 'filter', 'max_tries', 'diff_tool', 'compared_versions', 'diff_filter', 'enabled', 'treat_new_as_changed', 'user_visible_url') # determine if hyperlink "a" tag is used in HtmlReporter def location_is_url(self): @@ -205,6 +205,9 @@ class Job(JobBase): def pretty_name(self): return self.name if self.name else self.get_location() + def is_enabled(self): + return self.enabled is None or self.enabled + class ShellJob(Job): """Run a shell command and get its standard output""" diff --git a/lib/urlwatch/worker.py b/lib/urlwatch/worker.py index 8a7ea8c..23e710b 100644 --- a/lib/urlwatch/worker.py +++ b/lib/urlwatch/worker.py @@ -55,7 +55,7 @@ def run_jobs(urlwatcher): raise ValueError(f'All job indices must be between 1 and {len(urlwatcher.jobs)}: {urlwatcher.urlwatch_config.joblist}') cache_storage = urlwatcher.cache_storage jobs = [job.with_defaults(urlwatcher.config_storage.config) - for (idx, job) in enumerate(urlwatcher.jobs) if ((idx + 1) in urlwatcher.urlwatch_config.joblist or (not urlwatcher.urlwatch_config.joblist))] + for (idx, job) in enumerate(urlwatcher.jobs) if job.is_enabled() and ((idx + 1) in urlwatcher.urlwatch_config.joblist or (not urlwatcher.urlwatch_config.joblist))] report = urlwatcher.report logger.debug('Processing %d jobs (out of %d)', len(jobs), len(urlwatcher.jobs))
thp/urlwatch
e342af925930114b4194f1bdb660dec6348f653a
diff --git a/lib/urlwatch/tests/data/disabled-job.yaml b/lib/urlwatch/tests/data/disabled-job.yaml new file mode 100644 index 0000000..8b550c3 --- /dev/null +++ b/lib/urlwatch/tests/data/disabled-job.yaml @@ -0,0 +1,6 @@ +name: "1" +url: "|echo job 1" +enabled: false +--- +name: "2" +url: "|echo job 2" diff --git a/lib/urlwatch/tests/test_handler.py b/lib/urlwatch/tests/test_handler.py index 7886acc..8d90cbd 100644 --- a/lib/urlwatch/tests/test_handler.py +++ b/lib/urlwatch/tests/test_handler.py @@ -122,6 +122,27 @@ def test_run_watcher(): cache_storage.close() +def test_disabled_job(): + with teardown_func(): + urls = os.path.join(here, 'data', 'disabled-job.yaml') + config = os.path.join(here, 'data', 'urlwatch.yaml') + cache = os.path.join(here, 'data', 'cache.db') + hooks = '' + + config_storage = YamlConfigStorage(config) + urls_storage = UrlsYaml(urls) + cache_storage = CacheMiniDBStorage(cache) + try: + urlwatch_config = ConfigForTest(config, urls, cache, hooks, True) + + urlwatcher = Urlwatch(urlwatch_config, config_storage, cache_storage, urls_storage) + urlwatcher.run_jobs() + + assert len(urlwatcher.report.job_states) == 1 + finally: + cache_storage.close() + + def test_unserialize_shell_job_without_kind(): job = JobBase.unserialize({ 'name': 'hoho',
Feature request: support optional key `disabled` - `disabled`: disable watch (default: False) When set to `True`, do not watch the specified entry.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "lib/urlwatch/tests/test_handler.py::test_disabled_job" ]
[ "lib/urlwatch/tests/test_handler.py::test_required_classattrs_in_subclasses", "lib/urlwatch/tests/test_handler.py::test_save_load_jobs", "lib/urlwatch/tests/test_handler.py::test_load_config_yaml", "lib/urlwatch/tests/test_handler.py::test_load_urls_txt", "lib/urlwatch/tests/test_handler.py::test_load_urls_yaml", "lib/urlwatch/tests/test_handler.py::test_load_hooks_py", "lib/urlwatch/tests/test_handler.py::test_run_watcher", "lib/urlwatch/tests/test_handler.py::test_unserialize_shell_job_without_kind", "lib/urlwatch/tests/test_handler.py::test_unserialize_with_unknown_key", "lib/urlwatch/tests/test_handler.py::test_number_of_tries_in_cache_is_increased", "lib/urlwatch/tests/test_handler.py::test_report_error_when_out_of_tries", "lib/urlwatch/tests/test_handler.py::test_reset_tries_to_zero_when_successful" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-17T13:32:34Z"
bsd-3-clause
tianocore__edk2-pytool-extensions-125
diff --git a/edk2toolext/environment/extdeptypes/git_dependency.py b/edk2toolext/environment/extdeptypes/git_dependency.py index 4097716..82ad9b5 100644 --- a/edk2toolext/environment/extdeptypes/git_dependency.py +++ b/edk2toolext/environment/extdeptypes/git_dependency.py @@ -13,7 +13,6 @@ from edk2toolext.environment.external_dependency import ExternalDependency from edk2toolext.environment import repo_resolver from edk2toolext.edk2_git import Repo -from edk2toolext.environment import version_aggregator from edk2toolext.environment import shell_environment @@ -78,7 +77,7 @@ def clean(self): super().clean() # override verify due to different scheme with git - def verify(self, logversion=True): + def verify(self): result = True if not os.path.isdir(self._local_repo_root_path): @@ -104,7 +103,4 @@ def verify(self, logversion=True): result = False self.logger.debug("Verify '%s' returning '%s'." % (self.name, result)) - if(logversion): - version_aggregator.GetVersionAggregator().ReportVersion(self.name, self.version, - version_aggregator.VersionTypes.INFO) return result
tianocore/edk2-pytool-extensions
d122343ac18e896ce802ec22402ad59933f8bff0
diff --git a/edk2toolext/tests/test_git_dependency.py b/edk2toolext/tests/test_git_dependency.py index af6156b..d12ce25 100644 --- a/edk2toolext/tests/test_git_dependency.py +++ b/edk2toolext/tests/test_git_dependency.py @@ -87,7 +87,7 @@ def test_fetch_verify_good_repo_at_top_of_tree(self): ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) ext_dep.fetch() - self.assertTrue(ext_dep.verify(logversion=False)) + self.assertTrue(ext_dep.verify()) self.assertEqual(ext_dep.version, uptodate_version) def test_fetch_verify_good_repo_at_not_top_of_tree(self): @@ -98,7 +98,7 @@ def test_fetch_verify_good_repo_at_not_top_of_tree(self): ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) ext_dep.fetch() - self.assertTrue(ext_dep.verify(logversion=False)) + self.assertTrue(ext_dep.verify()) self.assertEqual(ext_dep.version, behind_one_version) def test_fetch_verify_non_existant_repo_commit_hash(self): @@ -110,7 +110,7 @@ def test_fetch_verify_non_existant_repo_commit_hash(self): ext_dep = GitDependency(ext_dep_descriptor) ext_dep.fetch() self.assertEqual(ext_dep.version, invalid_version) - self.assertFalse(ext_dep.verify(logversion=False), "Should not verify") + self.assertFalse(ext_dep.verify(), "Should not verify") def test_verify_no_directory(self): ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json") @@ -119,7 +119,7 @@ def test_verify_no_directory(self): ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) - self.assertFalse(ext_dep.verify(logversion=False)) + self.assertFalse(ext_dep.verify()) def test_verify_empty_repo_dir(self): ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json") @@ -129,7 +129,7 @@ def test_verify_empty_repo_dir(self): ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) os.makedirs(ext_dep._local_repo_root_path, exist_ok=True) - self.assertFalse(ext_dep.verify(logversion=False)) + self.assertFalse(ext_dep.verify()) def test_verify_invalid_git_repo(self): ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json") @@ -141,7 +141,7 @@ def test_verify_invalid_git_repo(self): os.makedirs(ext_dep._local_repo_root_path, exist_ok=True) with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as my_file: my_file.write("Test code\n") - self.assertFalse(ext_dep.verify(logversion=False)) + self.assertFalse(ext_dep.verify()) def test_verify_dirty_git_repo(self): ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json") @@ -154,7 +154,7 @@ def test_verify_dirty_git_repo(self): # now write a new file with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as my_file: my_file.write("Test code to make repo dirty\n") - self.assertFalse(ext_dep.verify(logversion=False)) + self.assertFalse(ext_dep.verify()) def test_verify_up_to_date(self): ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json") @@ -164,7 +164,7 @@ def test_verify_up_to_date(self): ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) ext_dep.fetch() - self.assertTrue(ext_dep.verify(logversion=False)) + self.assertTrue(ext_dep.verify()) def test_verify_down_level_repo(self): ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json") @@ -174,16 +174,16 @@ def test_verify_down_level_repo(self): ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) ext_dep.fetch() - self.assertTrue(ext_dep.verify(logversion=False), "Confirm valid ext_dep at one commit behind") + self.assertTrue(ext_dep.verify(), "Confirm valid ext_dep at one commit behind") with open(ext_dep_file_path, "w+") as ext_dep_file: ext_dep_file.write(hw_json_template % uptodate_version) ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) - self.assertFalse(ext_dep.verify(logversion=False), "Confirm downlevel repo fails to verify") + self.assertFalse(ext_dep.verify(), "Confirm downlevel repo fails to verify") ext_dep.fetch() - self.assertTrue(ext_dep.verify(logversion=False), "Confirm repo can be updated") + self.assertTrue(ext_dep.verify(), "Confirm repo can be updated") # CLEAN TESTS
Git Dependencies causes error with version aggregator **Describe the bug** When cloning a git dependency for the first time, we report the version twice. Once before we clone and once after. Since the path before the clone is none, the paths don't match and it throws an error. **To Reproduce** Steps to reproduce the behavior: 1. Clone a git ext dep in your tree via stuart_setup 2. See error **Additional context** Add any other context about the problem here.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_fetch_verify_good_repo_at_not_top_of_tree", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_fetch_verify_non_existant_repo_commit_hash", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_down_level_repo", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_empty_repo_dir", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_invalid_git_repo", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_no_directory" ]
[ "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_clean_clean_repo", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_clean_dir_but_not_git_repo", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_clean_dirty_git_repo", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_clean_no_directory", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_fetch_verify_good_repo_at_top_of_tree", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_dirty_git_repo", "edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_up_to_date", "edk2toolext/tests/test_git_dependency.py::TestGitDependencyUrlPatching::test_url_should_be_modified_if_creds_are_indicated_and_supplied", "edk2toolext/tests/test_git_dependency.py::TestGitDependencyUrlPatching::test_url_should_not_be_modified_without_descriptor_field", "edk2toolext/tests/test_git_dependency.py::TestGitDependencyUrlPatching::test_url_should_not_be_modified_without_env" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2019-11-22T18:06:56Z"
bsd-2-clause