instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
int64
0
0
environment_setup_commit
stringclasses
89 values
FAIL_TO_PASS
sequencelengths
1
4.94k
PASS_TO_PASS
sequencelengths
0
7.82k
meta
dict
created_at
unknown
license
stringclasses
8 values
tobymao__sqlglot-2658
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index ef431119..ef3dc237 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -587,6 +587,7 @@ class Tokenizer(metaclass=_Tokenizer): # Ensures we don't count an extra line if we get a \r\n line break sequence if self._char == "\r" and self._peek == "\n": i = 2 + self._start += 1 self._col = 1 self._line += 1 diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs index eb800f09..f386ce63 100644 --- a/sqlglotrs/src/tokenizer.rs +++ b/sqlglotrs/src/tokenizer.rs @@ -140,6 +140,7 @@ impl<'a> TokenizerState<'a> { // Ensures we don't count an extra line if we get a \r\n line break sequence. if self.current_char == '\r' && self.peek_char == '\n' { i = 2; + self.start += 1; } self.column = 1;
tobymao/sqlglot
bf5f14673a7ec592ef33b3c56516d686e67b2fe7
diff --git a/tests/test_tokens.py b/tests/test_tokens.py index b97f54a6..970c1ac2 100644 --- a/tests/test_tokens.py +++ b/tests/test_tokens.py @@ -71,6 +71,20 @@ x""" self.assertEqual(tokens[2].line, 2) self.assertEqual(tokens[3].line, 3) + def test_crlf(self): + tokens = Tokenizer().tokenize("SELECT a\r\nFROM b") + tokens = [(token.token_type, token.text) for token in tokens] + + self.assertEqual( + tokens, + [ + (TokenType.SELECT, "SELECT"), + (TokenType.VAR, "a"), + (TokenType.FROM, "FROM"), + (TokenType.VAR, "b"), + ], + ) + def test_command(self): tokens = Tokenizer().tokenize("SHOW;") self.assertEqual(tokens[0].token_type, TokenType.SHOW) diff --git a/tests/test_transpile.py b/tests/test_transpile.py index b732b459..fb8f8313 100644 --- a/tests/test_transpile.py +++ b/tests/test_transpile.py @@ -89,6 +89,7 @@ class TestTranspile(unittest.TestCase): self.validate("SELECT MIN(3)>=MIN(2)", "SELECT MIN(3) >= MIN(2)") self.validate("SELECT 1>0", "SELECT 1 > 0") self.validate("SELECT 3>=3", "SELECT 3 >= 3") + self.validate("SELECT a\r\nFROM b", "SELECT a FROM b") def test_comments(self): self.validate(
\r\n sometimes causes a parsing error Repro ```python from sqlglot import parse_one parse_one('select a\r\nfrom b') ``` It appears that when the fix went in for line numbers, that the advance function skips the \n, but doesn't advance start, so the text becomes \nfrom which doesn't match.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_tokens.py::TestTokens::test_crlf", "tests/test_transpile.py::TestTranspile::test_space" ]
[ "tests/test_tokens.py::TestTokens::test_command", "tests/test_tokens.py::TestTokens::test_comment_attachment", "tests/test_tokens.py::TestTokens::test_error_msg", "tests/test_tokens.py::TestTokens::test_jinja", "tests/test_tokens.py::TestTokens::test_space_keywords", "tests/test_tokens.py::TestTokens::test_token_line_col", "tests/test_transpile.py::TestTranspile::test_alias", "tests/test_transpile.py::TestTranspile::test_alter", "tests/test_transpile.py::TestTranspile::test_comments", "tests/test_transpile.py::TestTranspile::test_error_level", "tests/test_transpile.py::TestTranspile::test_extract", "tests/test_transpile.py::TestTranspile::test_identify_lambda", "tests/test_transpile.py::TestTranspile::test_identity", "tests/test_transpile.py::TestTranspile::test_if", "tests/test_transpile.py::TestTranspile::test_index_offset", "tests/test_transpile.py::TestTranspile::test_leading_comma", "tests/test_transpile.py::TestTranspile::test_normalize_name", "tests/test_transpile.py::TestTranspile::test_not_range", "tests/test_transpile.py::TestTranspile::test_paren", "tests/test_transpile.py::TestTranspile::test_partial", "tests/test_transpile.py::TestTranspile::test_pretty", "tests/test_transpile.py::TestTranspile::test_pretty_line_breaks", "tests/test_transpile.py::TestTranspile::test_some", "tests/test_transpile.py::TestTranspile::test_time", "tests/test_transpile.py::TestTranspile::test_types", "tests/test_transpile.py::TestTranspile::test_unary", "tests/test_transpile.py::TestTranspile::test_unsupported_level", "tests/test_transpile.py::TestTranspile::test_weird_chars", "tests/test_transpile.py::TestTranspile::test_with" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2023-12-12T16:49:30Z"
mit
tobymao__sqlglot-2659
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py index d981ffd2..41afad80 100644 --- a/sqlglot/dialects/duckdb.py +++ b/sqlglot/dialects/duckdb.py @@ -84,6 +84,20 @@ def _parse_date_diff(args: t.List) -> exp.Expression: return exp.DateDiff(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)) +def _parse_make_timestamp(args: t.List) -> exp.Expression: + if len(args) == 1: + return exp.UnixToTime(this=seq_get(args, 0), scale=exp.UnixToTime.MICROS) + + return exp.TimestampFromParts( + year=seq_get(args, 0), + month=seq_get(args, 1), + day=seq_get(args, 2), + hour=seq_get(args, 3), + min=seq_get(args, 4), + sec=seq_get(args, 5), + ) + + def _struct_sql(self: DuckDB.Generator, expression: exp.Struct) -> str: args: t.List[str] = [] for expr in expression.expressions: @@ -199,9 +213,7 @@ class DuckDB(Dialect): "LIST_REVERSE_SORT": _sort_array_reverse, "LIST_SORT": exp.SortArray.from_arg_list, "LIST_VALUE": exp.Array.from_arg_list, - "MAKE_TIMESTAMP": lambda args: exp.UnixToTime( - this=seq_get(args, 0), scale=exp.UnixToTime.MICROS - ), + "MAKE_TIMESTAMP": _parse_make_timestamp, "MEDIAN": lambda args: exp.PercentileCont( this=seq_get(args, 0), expression=exp.Literal.number(0.5) ), @@ -349,6 +361,7 @@ class DuckDB(Dialect): exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))", exp.Struct: _struct_sql, exp.Timestamp: no_timestamp_sql, + exp.TimestampFromParts: rename_func("MAKE_TIMESTAMP"), exp.TimestampTrunc: timestamptrunc_sql, exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)", exp.TimeStrToTime: timestrtotime_sql, diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 99722be1..19a96df2 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -5233,6 +5233,19 @@ class UnixToTimeStr(Func): pass +class TimestampFromParts(Func): + """Constructs a timestamp given its constituent parts.""" + + arg_types = { + "year": True, + "month": True, + "day": True, + "hour": True, + "min": True, + "sec": True, + } + + class Upper(Func): _sql_names = ["UPPER", "UCASE"]
tobymao/sqlglot
238300381b53c232a7ad0fd9e5b8b2ceaf563f08
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py index 412f5c49..f9151681 100644 --- a/tests/dialects/test_duckdb.py +++ b/tests/dialects/test_duckdb.py @@ -116,6 +116,8 @@ class TestDuckDB(Validator): parse_one("a // b", read="duckdb").assert_is(exp.IntDiv).sql(dialect="duckdb"), "a // b" ) + self.validate_identity("MAKE_TIMESTAMP(1992, 9, 20, 13, 34, 27.123456)") + self.validate_identity("MAKE_TIMESTAMP(1667810584123456)") self.validate_identity("SELECT EPOCH_MS(10) AS t") self.validate_identity("SELECT MAKE_TIMESTAMP(10) AS t") self.validate_identity("SELECT TO_TIMESTAMP(10) AS t")
`make_timestamp` parse for duckdb is incorrect and also doesn't accept the correct number of arguments (6) **Fully reproducible code snippet** Not sure what's happening here but all the inputs are being thrown away ``` In [6]: sg.parse_one("select make_timestamp(1,2,3,4,5)", read="duckdb") Out[6]: (SELECT expressions: (UNIXTOTIME this: (LITERAL this: 1, is_string: False), scale: (LITERAL this: micros, is_string: True))) In [8]: sg.parse_one("select make_timestamp(1,2,3,4,5)", read="duckdb").sql('duckdb') Out[8]: 'SELECT MAKE_TIMESTAMP(1)' ``` Second, this particular overload of DuckDB's `make_timestamp` accepts 6 arguments per the documentation: ![image](https://github.com/tobymao/sqlglot/assets/417981/03a34f38-156c-471b-ab9b-ba8172edd4dd) ``` In [9]: sg.parse_one("select make_timestamp(1,2,3,4,5,6)", read="duckdb") ... ParseError: The number of provided arguments (6) is greater than the maximum number of supported arguments (5). Line 1, Col: 34. select make_timestamp(1,2,3,4,5,6) ``` **Official Documentation** https://duckdb.org/docs/sql/functions/timestamp#timestamp-functions https://duckdb.org/docs/sql/functions/timestamptz#icu-timestamp-with-time-zone-functions
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb" ]
[ "tests/dialects/test_duckdb.py::TestDuckDB::test_array", "tests/dialects/test_duckdb.py::TestDuckDB::test_array_index", "tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or", "tests/dialects/test_duckdb.py::TestDuckDB::test_cast", "tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode", "tests/dialects/test_duckdb.py::TestDuckDB::test_isinf", "tests/dialects/test_duckdb.py::TestDuckDB::test_isnan", "tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table", "tests/dialects/test_duckdb.py::TestDuckDB::test_sample", "tests/dialects/test_duckdb.py::TestDuckDB::test_time", "tests/dialects/test_duckdb.py::TestDuckDB::test_timestamps_with_units" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-12-12T17:57:08Z"
mit
tobymao__sqlglot-2674
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index 42e8c661..fca42d48 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -369,12 +369,35 @@ class Snowflake(Dialect): return lateral + def _parse_at_before(self, table: exp.Table) -> exp.Table: + # https://docs.snowflake.com/en/sql-reference/constructs/at-before + index = self._index + if self._match_texts(("AT", "BEFORE")): + this = self._prev.text.upper() + kind = ( + self._match(TokenType.L_PAREN) + and self._match_texts(self.HISTORICAL_DATA_KIND) + and self._prev.text.upper() + ) + expression = self._match(TokenType.FARROW) and self._parse_bitwise() + + if expression: + self._match_r_paren() + when = self.expression( + exp.HistoricalData, this=this, kind=kind, expression=expression + ) + table.set("when", when) + else: + self._retreat(index) + + return table + def _parse_table_parts(self, schema: bool = False) -> exp.Table: # https://docs.snowflake.com/en/user-guide/querying-stage - if self._match_text_seq("@", advance=False): - table: t.Optional[exp.Expression] = self._parse_location_path() - elif self._match(TokenType.STRING, advance=False): + if self._match(TokenType.STRING, advance=False): table = self._parse_string() + elif self._match_text_seq("@", advance=False): + table = self._parse_location_path() else: table = None @@ -393,9 +416,11 @@ class Snowflake(Dialect): self._match(TokenType.COMMA) - return self.expression(exp.Table, this=table, format=file_format, pattern=pattern) + table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern) + else: + table = super()._parse_table_parts(schema=schema) - return super()._parse_table_parts(schema=schema) + return self._parse_at_before(table) def _parse_id_var( self, diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 19a96df2..6990344e 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1105,14 +1105,7 @@ class Create(DDL): # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_clone_statement # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_copy class Clone(Expression): - arg_types = { - "this": True, - "when": False, - "kind": False, - "shallow": False, - "expression": False, - "copy": False, - } + arg_types = {"this": True, "shallow": False, "copy": False} class Describe(Expression): @@ -2522,6 +2515,11 @@ class IndexTableHint(Expression): arg_types = {"this": True, "expressions": False, "target": False} +# https://docs.snowflake.com/en/sql-reference/constructs/at-before +class HistoricalData(Expression): + arg_types = {"this": True, "kind": True, "expression": True} + + class Table(Expression): arg_types = { "this": True, @@ -2538,6 +2536,7 @@ class Table(Expression): "pattern": False, "index": False, "ordinality": False, + "when": False, } @property diff --git a/sqlglot/generator.py b/sqlglot/generator.py index f3f90601..e03462d1 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -862,15 +862,7 @@ class Generator: this = self.sql(expression, "this") shallow = "SHALLOW " if expression.args.get("shallow") else "" keyword = "COPY" if expression.args.get("copy") and self.SUPPORTS_TABLE_COPY else "CLONE" - this = f"{shallow}{keyword} {this}" - when = self.sql(expression, "when") - - if when: - kind = self.sql(expression, "kind") - expr = self.sql(expression, "expression") - return f"{this} {when} ({kind} => {expr})" - - return this + return f"{shallow}{keyword} {this}" def describe_sql(self, expression: exp.Describe) -> str: return f"DESCRIBE {self.sql(expression, 'this')}" @@ -1400,6 +1392,12 @@ class Generator: target = f" FOR {target}" if target else "" return f"{this}{target} ({self.expressions(expression, flat=True)})" + def historicaldata_sql(self, expression: exp.HistoricalData) -> str: + this = self.sql(expression, "this") + kind = self.sql(expression, "kind") + expr = self.sql(expression, "expression") + return f"{this} ({kind} => {expr})" + def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str: table = ".".join( self.sql(part) @@ -1436,6 +1434,10 @@ class Generator: ordinality = f" WITH ORDINALITY{alias}" alias = "" + when = self.sql(expression, "when") + if when: + table = f"{table} {when}" + return f"{table}{version}{file_format}{alias}{index}{hints}{pivots}{joins}{laterals}{ordinality}" def tablesample_sql( diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 1fae5959..5399b293 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -907,7 +907,7 @@ class Parser(metaclass=_Parser): INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"} CLONE_KEYWORDS = {"CLONE", "COPY"} - CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"} + HISTORICAL_DATA_KIND = {"TIMESTAMP", "OFFSET", "STATEMENT", "STREAM"} OPCLASS_FOLLOW_KEYWORDS = {"ASC", "DESC", "NULLS"} OPTYPE_FOLLOW_TOKENS = {TokenType.COMMA, TokenType.R_PAREN} @@ -1411,23 +1411,8 @@ class Parser(metaclass=_Parser): if self._match_texts(self.CLONE_KEYWORDS): copy = self._prev.text.lower() == "copy" - clone = self._parse_table(schema=True) - when = self._match_texts(("AT", "BEFORE")) and self._prev.text.upper() - clone_kind = ( - self._match(TokenType.L_PAREN) - and self._match_texts(self.CLONE_KINDS) - and self._prev.text.upper() - ) - clone_expression = self._match(TokenType.FARROW) and self._parse_bitwise() - self._match(TokenType.R_PAREN) clone = self.expression( - exp.Clone, - this=clone, - when=when, - kind=clone_kind, - shallow=shallow, - expression=clone_expression, - copy=copy, + exp.Clone, this=self._parse_table(schema=True), shallow=shallow, copy=copy ) return self.expression(
tobymao/sqlglot
2ae0debec0b945b0ece250d8e1e29b072b05602a
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 29931323..13f32c13 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -72,6 +72,18 @@ WHERE self.validate_identity( 'DESCRIBE TABLE "SNOWFLAKE_SAMPLE_DATA"."TPCDS_SF100TCL"."WEB_SITE" type=stage' ) + self.validate_identity( + "SELECT * FROM foo at", + "SELECT * FROM foo AS at", + ) + self.validate_identity( + "SELECT * FROM foo before", + "SELECT * FROM foo AS before", + ) + self.validate_identity( + "SELECT * FROM foo at (col)", + "SELECT * FROM foo AS at(col)", + ) self.validate_identity( "SELECT * FROM unnest(x) with ordinality", "SELECT * FROM TABLE(FLATTEN(INPUT => x)) AS _u(seq, key, path, index, value, this)", @@ -779,6 +791,53 @@ WHERE }, ) + def test_historical_data(self): + self.validate_identity("SELECT * FROM my_table AT (STATEMENT => $query_id_var)") + self.validate_identity("SELECT * FROM my_table AT (OFFSET => -60 * 5)") + self.validate_identity("SELECT * FROM my_table BEFORE (STATEMENT => $query_id_var)") + self.validate_identity("SELECT * FROM my_table BEFORE (OFFSET => -60 * 5)") + self.validate_identity("CREATE SCHEMA restored_schema CLONE my_schema AT (OFFSET => -3600)") + self.validate_identity( + "CREATE TABLE restored_table CLONE my_table AT (TIMESTAMP => CAST('Sat, 09 May 2015 01:01:00 +0300' AS TIMESTAMPTZ))", + ) + self.validate_identity( + "CREATE DATABASE restored_db CLONE my_db BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726')" + ) + self.validate_identity( + "SELECT * FROM my_table AT (TIMESTAMP => TO_TIMESTAMP(1432669154242, 3))" + ) + self.validate_identity( + "SELECT * FROM my_table AT (OFFSET => -60 * 5) AS T WHERE T.flag = 'valid'" + ) + self.validate_identity( + "SELECT * FROM my_table AT (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726')" + ) + self.validate_identity( + "SELECT * FROM my_table BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726')" + ) + self.validate_identity( + "SELECT * FROM my_table AT (TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp)", + "SELECT * FROM my_table AT (TIMESTAMP => CAST('Fri, 01 May 2015 16:20:00 -0700' AS TIMESTAMPNTZ))", + ) + self.validate_identity( + "SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz)", + "SELECT * FROM my_table AT (TIMESTAMP => CAST('Fri, 01 May 2015 16:20:00 -0700' AS TIMESTAMPTZ))", + ) + self.validate_identity( + "SELECT * FROM my_table BEFORE (TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz);", + "SELECT * FROM my_table BEFORE (TIMESTAMP => CAST('Fri, 01 May 2015 16:20:00 -0700' AS TIMESTAMPTZ))", + ) + self.validate_identity( + """ + SELECT oldt.* , newt.* + FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt + FULL OUTER JOIN my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS newt + ON oldt.id = newt.id + WHERE oldt.id IS NULL OR newt.id IS NULL; + """, + "SELECT oldt.*, newt.* FROM my_table BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt FULL OUTER JOIN my_table AT (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS newt ON oldt.id = newt.id WHERE oldt.id IS NULL OR newt.id IS NULL", + ) + def test_ddl(self): self.validate_identity( """create external table et2(
Snowflake SELECT ... AT/BEFORE syntax (time travel) does not parse in 20.1.0 In 20.1.0, the SELECT AT/BEFORE (time travel) examples from the documentation raise a ParserError. The CREATE TABLE/SCHEMA/DATABASE examples transpile successfully. Documentation/references: * https://docs.snowflake.com/en/user-guide/data-time-travel#querying-historical-data * https://docs.snowflake.com/en/sql-reference/constructs/at-before * Issue for time travel for other databases: https://github.com/tobymao/sqlglot/issues/2128 ``` examples = [ "SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp);", "SELECT * FROM my_table AT(TIMESTAMP => TO_TIMESTAMP(1432669154242, 3));", "SELECT * FROM my_table AT(OFFSET => -60*5) AS T WHERE T.flag = 'valid';", "SELECT * FROM my_table AT (STATEMENT=>$query_id_var);", "SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz);", "SELECT * FROM my_table AT (OFFSET => -60*5);", "SELECT * FROM my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');", "SELECT * FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');", "SELECT * FROM my_table BEFORE(STATEMENT=>$query_id_var);", "SELECT * FROM my_table BEFORE (TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz);", "SELECT * FROM my_table BEFORE(OFFSET => -60*5);", "SELECT * FROM my_table BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');", """ SELECT oldt.* , newt.* FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt FULL OUTER JOIN my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS newt ON oldt.id = newt.id WHERE oldt.id IS NULL OR newt.id IS NULL; """, # these work today: "CREATE TABLE restored_table CLONE my_table AT(TIMESTAMP => 'Sat, 09 May 2015 01:01:00 +0300'::timestamp_tz);", "CREATE SCHEMA restored_schema CLONE my_schema AT(OFFSET => -3600);", "CREATE DATABASE restored_db CLONE my_db BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');", ] import sqlglot import sqlglot.errors print(sqlglot.__version__) for i, s in enumerate(examples): try: t = sqlglot.transpile( s.strip(), read="snowflake", write="snowflake", pretty=True ) print(i, "okay", t) except sqlglot.errors.ParseError as e: print(i, "error", e) ``` Output (expected is all are okay): ``` 20.1.0 0 error Expecting ). Line 1, Col: 38. SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp); 1 error Expecting ). Line 1, Col: 38. SELECT * FROM my_table AT(TIMESTAMP => TO_TIMESTAMP(1432669154242, 3)); 2 error Expecting ). Line 1, Col: 35. SELECT * FROM my_table AT(OFFSET => -60*5) AS T WHERE T.flag = 'valid'; 3 error Expecting ). Line 1, Col: 38. SELECT * FROM my_table AT (STATEMENT=>$query_id_var); 4 error Expecting ). Line 1, Col: 38. SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz); 5 error Expecting ). Line 1, Col: 36. SELECT * FROM my_table AT (OFFSET => -60*5); 6 error Expecting ). Line 1, Col: 38. SELECT * FROM my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); 7 error Expecting ). Line 1, Col: 42. SELECT * FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); 8 error Expecting ). Line 1, Col: 41. SELECT * FROM my_table BEFORE(STATEMENT=>$query_id_var); 9 error Expecting ). Line 1, Col: 43. SELECT * FROM my_table BEFORE (TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz); 10 error Expecting ). Line 1, Col: 39. SELECT * FROM my_table BEFORE(OFFSET => -60*5); 11 error Expecting ). Line 1, Col: 43. SELECT * FROM my_table BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); 12 error Expecting ). Line 2, Col: 34. SELECT oldt.* , newt.* FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt FULL OUTER JOIN my_table AT(STATEMENT => '8e5d0 13 okay ["CREATE TABLE restored_table CLONE my_table AT (TIMESTAMP => CAST('Sat, 09 May 2015 01:01:00 +0300' AS TIMESTAMPTZ))"] 14 okay ['CREATE SCHEMA restored_schema CLONE my_schema AT (OFFSET => -3600)'] 15 okay ["CREATE DATABASE restored_db CLONE my_db BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726')"] ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data" ]
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_ddl", "tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table", "tests/dialects/test_snowflake.py::TestSnowflake::test_flatten", "tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize", "tests/dialects/test_snowflake.py::TestSnowflake::test_minus", "tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment", "tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr", "tests/dialects/test_snowflake.py::TestSnowflake::test_sample", "tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types", "tests/dialects/test_snowflake.py::TestSnowflake::test_show", "tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake", "tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files", "tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures", "tests/dialects/test_snowflake.py::TestSnowflake::test_swap", "tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal", "tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps", "tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast", "tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions", "tests/dialects/test_snowflake.py::TestSnowflake::test_values" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-12-14T01:41:07Z"
mit
tobymao__sqlglot-2700
diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py index e9aa45db..b7eef451 100644 --- a/sqlglot/dialects/dialect.py +++ b/sqlglot/dialects/dialect.py @@ -126,6 +126,7 @@ class _Dialect(type): klass.BIT_START, klass.BIT_END = get_start_end(TokenType.BIT_STRING) klass.HEX_START, klass.HEX_END = get_start_end(TokenType.HEX_STRING) klass.BYTE_START, klass.BYTE_END = get_start_end(TokenType.BYTE_STRING) + klass.UNICODE_START, klass.UNICODE_END = get_start_end(TokenType.UNICODE_STRING) if enum not in ("", "bigquery"): klass.generator_class.SELECT_KINDS = () @@ -240,13 +241,15 @@ class Dialect(metaclass=_Dialect): IDENTIFIER_START = '"' IDENTIFIER_END = '"' - # Delimiters for bit, hex and byte literals + # Delimiters for bit, hex, byte and unicode literals BIT_START: t.Optional[str] = None BIT_END: t.Optional[str] = None HEX_START: t.Optional[str] = None HEX_END: t.Optional[str] = None BYTE_START: t.Optional[str] = None BYTE_END: t.Optional[str] = None + UNICODE_START: t.Optional[str] = None + UNICODE_END: t.Optional[str] = None @classmethod def get_or_raise(cls, dialect: DialectType) -> Dialect: diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py index 88f4f539..5e6d444d 100644 --- a/sqlglot/dialects/presto.py +++ b/sqlglot/dialects/presto.py @@ -222,6 +222,12 @@ class Presto(Dialect): NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE class Tokenizer(tokens.Tokenizer): + UNICODE_STRINGS = [ + (prefix + q, q) + for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES) + for prefix in ("U&", "u&") + ] + KEYWORDS = { **tokens.Tokenizer.KEYWORDS, "START": TokenType.BEGIN, diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index fca42d48..36bbcc50 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -33,6 +33,21 @@ def _check_int(s: str) -> bool: return s.isdigit() +def _parse_to_array(args: t.List) -> exp.Expression: + arg = seq_get(args, 0) + if isinstance(arg, exp.Expression): + from sqlglot.optimizer.annotate_types import annotate_types + + # https://docs.snowflake.com/en/sql-reference/functions/to_array + arg = annotate_types(arg) + if arg.is_type(exp.DataType.Type.ARRAY): + return arg + if arg.is_type(exp.DataType.Type.VARIANT): + return exp.Anonymous(this="TO_ARRAY", expressions=[arg]) + + return exp.Array.from_arg_list(args) + + # from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]: if len(args) == 2: @@ -293,7 +308,7 @@ class Snowflake(Dialect): "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), "TIMEDIFF": _parse_datediff, "TIMESTAMPDIFF": _parse_datediff, - "TO_ARRAY": exp.Array.from_arg_list, + "TO_ARRAY": _parse_to_array, "TO_TIMESTAMP": _parse_to_timestamp, "TO_VARCHAR": exp.ToChar.from_arg_list, "ZEROIFNULL": _zeroifnull_to_if, diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 6990344e..e2839576 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1206,6 +1206,10 @@ class RawString(Condition): pass +class UnicodeString(Condition): + arg_types = {"this": True, "escape": False} + + class Column(Condition): arg_types = {"this": True, "table": False, "db": False, "catalog": False, "join_mark": False} diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 665538eb..add02d06 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -915,6 +915,14 @@ class Generator: return f"{self.dialect.BYTE_START}{this}{self.dialect.BYTE_END}" return this + def unicodestring_sql(self, expression: exp.UnicodeString) -> str: + this = self.sql(expression, "this") + if self.dialect.UNICODE_START: + escape = self.sql(expression, "escape") + escape = f" UESCAPE {escape}" if escape else "" + return f"{self.dialect.UNICODE_START}{this}{self.dialect.UNICODE_END}{escape}" + return this + def rawstring_sql(self, expression: exp.RawString) -> str: string = self.escape_str(expression.this.replace("\\", "\\\\")) return f"{self.dialect.QUOTE_START}{string}{self.dialect.QUOTE_END}" diff --git a/sqlglot/parser.py b/sqlglot/parser.py index bee2cff8..c4062e1d 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -635,6 +635,11 @@ class Parser(metaclass=_Parser): TokenType.HEREDOC_STRING: lambda self, token: self.expression( exp.RawString, this=token.text ), + TokenType.UNICODE_STRING: lambda self, token: self.expression( + exp.UnicodeString, + this=token.text, + escape=self._match_text_seq("UESCAPE") and self._parse_string(), + ), TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(), } @@ -3599,7 +3604,7 @@ class Parser(metaclass=_Parser): exp.DataType, this=exp.DataType.Type.INTERVAL, expressions=span ) else: - this = self.expression(exp.Interval, unit=unit) + this = self.expression(exp.DataType, this=self.expression(exp.Interval, unit=unit)) if maybe_func and check_func: index2 = self._index diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index aaeafb1c..de9d4c4a 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -97,6 +97,7 @@ class TokenType(AutoName): NATIONAL_STRING = auto() RAW_STRING = auto() HEREDOC_STRING = auto() + UNICODE_STRING = auto() # types BIT = auto() @@ -450,6 +451,7 @@ class _Tokenizer(type): **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS), **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS), **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS), + **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS), } klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) @@ -557,6 +559,7 @@ class Tokenizer(metaclass=_Tokenizer): HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = [] + UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = [] IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] IDENTIFIER_ESCAPES = ['"'] QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
tobymao/sqlglot
0d6831aecfee75dcb4ed74dab37b7dd2b304c6f6
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 0332ae1a..91556960 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -8,6 +8,11 @@ class TestPostgres(Validator): dialect = "postgres" def test_ddl(self): + expr = parse_one("CREATE TABLE t (x INTERVAL day)", read="postgres") + cdef = expr.find(exp.ColumnDef) + cdef.args["kind"].assert_is(exp.DataType) + self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL day)") + self.validate_identity("CREATE INDEX idx_x ON x USING BTREE(x, y) WHERE (NOT y IS NULL)") self.validate_identity("CREATE TABLE test (elems JSONB[])") self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)") diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py index ad85ddc6..a9d88cb0 100644 --- a/tests/dialects/test_presto.py +++ b/tests/dialects/test_presto.py @@ -544,26 +544,18 @@ class TestPresto(Validator): }, ) - def test_presto(self): - self.validate_identity("string_agg(x, ',')", "ARRAY_JOIN(ARRAY_AGG(x), ',')") - self.validate_identity( - "SELECT * FROM example.testdb.customer_orders FOR VERSION AS OF 8954597067493422955" - ) - self.validate_identity( - "SELECT * FROM example.testdb.customer_orders FOR TIMESTAMP AS OF CAST('2022-03-23 09:59:29.803 Europe/Vienna' AS TIMESTAMP)" - ) - - self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1") - self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY") - self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)") - self.validate_identity("SELECT * FROM (VALUES (1))") - self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE") - self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ") - self.validate_identity("APPROX_PERCENTILE(a, b, c, d)") - self.validate_identity( - "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))" - ) + def test_unicode_string(self): + for prefix in ("u&", "U&"): + self.validate_identity( + f"{prefix}'Hello winter \\2603 !'", + "U&'Hello winter \\2603 !'", + ) + self.validate_identity( + f"{prefix}'Hello winter #2603 !' UESCAPE '#'", + "U&'Hello winter #2603 !' UESCAPE '#'", + ) + def test_presto(self): with self.assertLogs(helper_logger) as cm: self.validate_all( "SELECT COALESCE(ELEMENT_AT(MAP_FROM_ENTRIES(ARRAY[(51, '1')]), id), quantity) FROM my_table", @@ -582,6 +574,24 @@ class TestPresto(Validator): }, ) + self.validate_identity("string_agg(x, ',')", "ARRAY_JOIN(ARRAY_AGG(x), ',')") + self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1") + self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY") + self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)") + self.validate_identity("SELECT * FROM (VALUES (1))") + self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE") + self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ") + self.validate_identity("APPROX_PERCENTILE(a, b, c, d)") + self.validate_identity( + "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))" + ) + self.validate_identity( + "SELECT * FROM example.testdb.customer_orders FOR VERSION AS OF 8954597067493422955" + ) + self.validate_identity( + "SELECT * FROM example.testdb.customer_orders FOR TIMESTAMP AS OF CAST('2022-03-23 09:59:29.803 Europe/Vienna' AS TIMESTAMP)" + ) + self.validate_all( "SELECT MAX_BY(a.id, a.timestamp) FROM a", read={ diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 13f32c13..4c2d7ca7 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -127,11 +127,45 @@ WHERE "SELECT TO_TIMESTAMP(x) FROM t", "SELECT CAST(x AS TIMESTAMPNTZ) FROM t", ) + self.validate_identity( + "CAST(x AS BYTEINT)", + "CAST(x AS INT)", + ) + self.validate_identity( + "CAST(x AS CHAR VARYING)", + "CAST(x AS VARCHAR)", + ) + self.validate_identity( + "CAST(x AS CHARACTER VARYING)", + "CAST(x AS VARCHAR)", + ) + self.validate_identity( + "CAST(x AS NCHAR VARYING)", + "CAST(x AS VARCHAR)", + ) + self.validate_identity( + "SELECT TO_ARRAY(x::ARRAY)", + "SELECT CAST(x AS ARRAY)", + ) + self.validate_identity( + "SELECT TO_ARRAY(['test']::VARIANT)", + "SELECT TO_ARRAY(CAST(['test'] AS VARIANT))", + ) - self.validate_all("CAST(x AS BYTEINT)", write={"snowflake": "CAST(x AS INT)"}) - self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) - self.validate_all("CAST(x AS CHARACTER VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) - self.validate_all("CAST(x AS NCHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) + self.validate_all( + "SELECT TO_ARRAY(['test'])", + write={ + "snowflake": "SELECT ['test']", + "spark": "SELECT ARRAY('test')", + }, + ) + self.validate_all( + "SELECT TO_ARRAY(['test'])", + write={ + "snowflake": "SELECT ['test']", + "spark": "SELECT ARRAY('test')", + }, + ) self.validate_all( # We need to qualify the columns in this query because "value" would be ambiguous 'WITH t(x, "value") AS (SELECT [1, 2, 3], 1) SELECT IFF(_u.pos = _u_2.pos_2, _u_2."value", NULL) AS "value" FROM t, TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (GREATEST(ARRAY_SIZE(t.x)) - 1) + 1))) AS _u(seq, key, path, index, pos, this) CROSS JOIN TABLE(FLATTEN(INPUT => t.x)) AS _u_2(seq, key, path, pos_2, "value", this) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > (ARRAY_SIZE(t.x) - 1) AND _u_2.pos_2 = (ARRAY_SIZE(t.x) - 1))',
Snowflake to_array implemented incorrectly The docs for snowflake implementation of [to_array](https://docs.snowflake.com/en/sql-reference/functions/to_array) state > If the input is an ARRAY, or VARIANT containing an array value, the result is unchanged. > For NULL or a JSON null input, returns NULL. > For any other value, the result is a single-element array containing this value. As we can see below, the input as an array, the result is an array of arrays which is not unchanged. ``` >>> parse_one("select to_array(['test']) from test;", dialect="snowflake").sql(pretty=True) "SELECT\n ARRAY(ARRAY('test'))\nFROM test" ``` ``` sqlglot==20.2.0 ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_ddl", "tests/dialects/test_presto.py::TestPresto::test_unicode_string", "tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake" ]
[ "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_postgres", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_variance", "tests/dialects/test_presto.py::TestPresto::test_cast", "tests/dialects/test_presto.py::TestPresto::test_ddl", "tests/dialects/test_presto.py::TestPresto::test_encode_decode", "tests/dialects/test_presto.py::TestPresto::test_hex_unhex", "tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular", "tests/dialects/test_presto.py::TestPresto::test_json", "tests/dialects/test_presto.py::TestPresto::test_match_recognize", "tests/dialects/test_presto.py::TestPresto::test_presto", "tests/dialects/test_presto.py::TestPresto::test_quotes", "tests/dialects/test_presto.py::TestPresto::test_regex", "tests/dialects/test_presto.py::TestPresto::test_time", "tests/dialects/test_presto.py::TestPresto::test_to_char", "tests/dialects/test_presto.py::TestPresto::test_unnest", "tests/dialects/test_snowflake.py::TestSnowflake::test_ddl", "tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table", "tests/dialects/test_snowflake.py::TestSnowflake::test_flatten", "tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data", "tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize", "tests/dialects/test_snowflake.py::TestSnowflake::test_minus", "tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment", "tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr", "tests/dialects/test_snowflake.py::TestSnowflake::test_sample", "tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types", "tests/dialects/test_snowflake.py::TestSnowflake::test_show", "tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files", "tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures", "tests/dialects/test_snowflake.py::TestSnowflake::test_swap", "tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal", "tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps", "tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast", "tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions", "tests/dialects/test_snowflake.py::TestSnowflake::test_values" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-12-18T13:00:25Z"
mit
tobymao__sqlglot-2701
diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py index e9aa45db..b7eef451 100644 --- a/sqlglot/dialects/dialect.py +++ b/sqlglot/dialects/dialect.py @@ -126,6 +126,7 @@ class _Dialect(type): klass.BIT_START, klass.BIT_END = get_start_end(TokenType.BIT_STRING) klass.HEX_START, klass.HEX_END = get_start_end(TokenType.HEX_STRING) klass.BYTE_START, klass.BYTE_END = get_start_end(TokenType.BYTE_STRING) + klass.UNICODE_START, klass.UNICODE_END = get_start_end(TokenType.UNICODE_STRING) if enum not in ("", "bigquery"): klass.generator_class.SELECT_KINDS = () @@ -240,13 +241,15 @@ class Dialect(metaclass=_Dialect): IDENTIFIER_START = '"' IDENTIFIER_END = '"' - # Delimiters for bit, hex and byte literals + # Delimiters for bit, hex, byte and unicode literals BIT_START: t.Optional[str] = None BIT_END: t.Optional[str] = None HEX_START: t.Optional[str] = None HEX_END: t.Optional[str] = None BYTE_START: t.Optional[str] = None BYTE_END: t.Optional[str] = None + UNICODE_START: t.Optional[str] = None + UNICODE_END: t.Optional[str] = None @classmethod def get_or_raise(cls, dialect: DialectType) -> Dialect: diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py index 88f4f539..5e6d444d 100644 --- a/sqlglot/dialects/presto.py +++ b/sqlglot/dialects/presto.py @@ -222,6 +222,12 @@ class Presto(Dialect): NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE class Tokenizer(tokens.Tokenizer): + UNICODE_STRINGS = [ + (prefix + q, q) + for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES) + for prefix in ("U&", "u&") + ] + KEYWORDS = { **tokens.Tokenizer.KEYWORDS, "START": TokenType.BEGIN, diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index fca42d48..36bbcc50 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -33,6 +33,21 @@ def _check_int(s: str) -> bool: return s.isdigit() +def _parse_to_array(args: t.List) -> exp.Expression: + arg = seq_get(args, 0) + if isinstance(arg, exp.Expression): + from sqlglot.optimizer.annotate_types import annotate_types + + # https://docs.snowflake.com/en/sql-reference/functions/to_array + arg = annotate_types(arg) + if arg.is_type(exp.DataType.Type.ARRAY): + return arg + if arg.is_type(exp.DataType.Type.VARIANT): + return exp.Anonymous(this="TO_ARRAY", expressions=[arg]) + + return exp.Array.from_arg_list(args) + + # from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]: if len(args) == 2: @@ -293,7 +308,7 @@ class Snowflake(Dialect): "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), "TIMEDIFF": _parse_datediff, "TIMESTAMPDIFF": _parse_datediff, - "TO_ARRAY": exp.Array.from_arg_list, + "TO_ARRAY": _parse_to_array, "TO_TIMESTAMP": _parse_to_timestamp, "TO_VARCHAR": exp.ToChar.from_arg_list, "ZEROIFNULL": _zeroifnull_to_if, diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 6990344e..6179b0c7 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1206,6 +1206,10 @@ class RawString(Condition): pass +class UnicodeString(Condition): + arg_types = {"this": True, "escape": False} + + class Column(Condition): arg_types = {"this": True, "table": False, "db": False, "catalog": False, "join_mark": False} @@ -1960,7 +1964,12 @@ class Offset(Expression): class Order(Expression): - arg_types = {"this": False, "expressions": True} + arg_types = {"this": False, "expressions": True, "interpolate": False} + + +# https://clickhouse.com/docs/en/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier +class WithFill(Expression): + arg_types = {"from": False, "to": False, "step": False} # hive specific sorts @@ -1978,7 +1987,7 @@ class Sort(Order): class Ordered(Expression): - arg_types = {"this": True, "desc": False, "nulls_first": True} + arg_types = {"this": True, "desc": False, "nulls_first": True, "with_fill": False} class Property(Expression): diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 665538eb..0aac498d 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -915,6 +915,14 @@ class Generator: return f"{self.dialect.BYTE_START}{this}{self.dialect.BYTE_END}" return this + def unicodestring_sql(self, expression: exp.UnicodeString) -> str: + this = self.sql(expression, "this") + if self.dialect.UNICODE_START: + escape = self.sql(expression, "escape") + escape = f" UESCAPE {escape}" if escape else "" + return f"{self.dialect.UNICODE_START}{this}{self.dialect.UNICODE_END}{escape}" + return this + def rawstring_sql(self, expression: exp.RawString) -> str: string = self.escape_str(expression.this.replace("\\", "\\\\")) return f"{self.dialect.QUOTE_START}{string}{self.dialect.QUOTE_END}" @@ -1786,7 +1794,24 @@ class Generator: def order_sql(self, expression: exp.Order, flat: bool = False) -> str: this = self.sql(expression, "this") this = f"{this} " if this else this - return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat) # type: ignore + order = self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat) # type: ignore + interpolated_values = [ + f"{self.sql(named_expression, 'alias')} AS {self.sql(named_expression, 'this')}" + for named_expression in expression.args.get("interpolate") or [] + ] + interpolate = ( + f" INTERPOLATE ({', '.join(interpolated_values)})" if interpolated_values else "" + ) + return f"{order}{interpolate}" + + def withfill_sql(self, expression: exp.WithFill) -> str: + from_sql = self.sql(expression, "from") + from_sql = f" FROM {from_sql}" if from_sql else "" + to_sql = self.sql(expression, "to") + to_sql = f" TO {to_sql}" if to_sql else "" + step_sql = self.sql(expression, "step") + step_sql = f" STEP {step_sql}" if step_sql else "" + return f"WITH FILL{from_sql}{to_sql}{step_sql}" def cluster_sql(self, expression: exp.Cluster) -> str: return self.op_expressions("CLUSTER BY", expression) @@ -1828,7 +1853,10 @@ class Generator: this = f"CASE WHEN {this} IS NULL THEN 1 ELSE 0 END{null_sort_order}, {this}" nulls_sort_change = "" - return f"{this}{sort_order}{nulls_sort_change}" + with_fill = self.sql(expression, "with_fill") + with_fill = f" {with_fill}" if with_fill else "" + + return f"{this}{sort_order}{nulls_sort_change}{with_fill}" def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str: partition = self.partition_by_sql(expression) diff --git a/sqlglot/parser.py b/sqlglot/parser.py index bee2cff8..e9e9cc56 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -635,6 +635,11 @@ class Parser(metaclass=_Parser): TokenType.HEREDOC_STRING: lambda self, token: self.expression( exp.RawString, this=token.text ), + TokenType.UNICODE_STRING: lambda self, token: self.expression( + exp.UnicodeString, + this=token.text, + escape=self._match_text_seq("UESCAPE") and self._parse_string(), + ), TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(), } @@ -2463,13 +2468,7 @@ class Parser(metaclass=_Parser): pattern = None define = ( - self._parse_csv( - lambda: self.expression( - exp.Alias, - alias=self._parse_id_var(any_token=True), - this=self._match(TokenType.ALIAS) and self._parse_conjunction(), - ) - ) + self._parse_csv(self._parse_name_as_expression) if self._match_text_seq("DEFINE") else None ) @@ -3116,6 +3115,18 @@ class Parser(metaclass=_Parser): return self.expression(exp.Connect, start=start, connect=connect) + def _parse_name_as_expression(self) -> exp.Alias: + return self.expression( + exp.Alias, + alias=self._parse_id_var(any_token=True), + this=self._match(TokenType.ALIAS) and self._parse_conjunction(), + ) + + def _parse_interpolate(self) -> t.Optional[t.List[exp.Expression]]: + if self._match_text_seq("INTERPOLATE"): + return self._parse_wrapped_csv(self._parse_name_as_expression) + return None + def _parse_order( self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False ) -> t.Optional[exp.Expression]: @@ -3123,7 +3134,10 @@ class Parser(metaclass=_Parser): return this return self.expression( - exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered) + exp.Order, + this=this, + expressions=self._parse_csv(self._parse_ordered), + interpolate=self._parse_interpolate(), ) def _parse_sort(self, exp_class: t.Type[E], token: TokenType) -> t.Optional[E]: @@ -3153,7 +3167,21 @@ class Parser(metaclass=_Parser): ): nulls_first = True - return self.expression(exp.Ordered, this=this, desc=desc, nulls_first=nulls_first) + if self._match_text_seq("WITH", "FILL"): + with_fill = self.expression( + exp.WithFill, + **{ # type: ignore + "from": self._match(TokenType.FROM) and self._parse_bitwise(), + "to": self._match_text_seq("TO") and self._parse_bitwise(), + "step": self._match_text_seq("STEP") and self._parse_bitwise(), + }, + ) + else: + with_fill = None + + return self.expression( + exp.Ordered, this=this, desc=desc, nulls_first=nulls_first, with_fill=with_fill + ) def _parse_limit( self, this: t.Optional[exp.Expression] = None, top: bool = False @@ -3599,7 +3627,7 @@ class Parser(metaclass=_Parser): exp.DataType, this=exp.DataType.Type.INTERVAL, expressions=span ) else: - this = self.expression(exp.Interval, unit=unit) + this = self.expression(exp.DataType, this=self.expression(exp.Interval, unit=unit)) if maybe_func and check_func: index2 = self._index diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index aaeafb1c..de9d4c4a 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -97,6 +97,7 @@ class TokenType(AutoName): NATIONAL_STRING = auto() RAW_STRING = auto() HEREDOC_STRING = auto() + UNICODE_STRING = auto() # types BIT = auto() @@ -450,6 +451,7 @@ class _Tokenizer(type): **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS), **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS), **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS), + **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS), } klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) @@ -557,6 +559,7 @@ class Tokenizer(metaclass=_Tokenizer): HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = [] + UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = [] IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] IDENTIFIER_ESCAPES = ['"'] QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
tobymao/sqlglot
0d6831aecfee75dcb4ed74dab37b7dd2b304c6f6
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py index e56bdabb..1f528b62 100644 --- a/tests/dialects/test_clickhouse.py +++ b/tests/dialects/test_clickhouse.py @@ -70,6 +70,18 @@ class TestClickhouse(Validator): self.validate_identity("CAST(x AS DATETIME)") self.validate_identity("CAST(x as MEDIUMINT)", "CAST(x AS Int32)") self.validate_identity("SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src") + self.validate_identity( + "SELECT n, source FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL" + ) + self.validate_identity( + "SELECT n, source FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5" + ) + self.validate_identity( + "SELECT toDate((number * 10) * 86400) AS d1, toDate(number * 86400) AS d2, 'original' AS source FROM numbers(10) WHERE (number % 3) = 1 ORDER BY d2 WITH FILL, d1 WITH FILL STEP 5" + ) + self.validate_identity( + "SELECT n, source, inter FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 INTERPOLATE (inter AS inter + 1)" + ) self.validate_identity( "SELECT SUM(1) AS impressions, arrayJoin(cities) AS city, arrayJoin(browsers) AS browser FROM (SELECT ['Istanbul', 'Berlin', 'Bobruisk'] AS cities, ['Firefox', 'Chrome', 'Chrome'] AS browsers) GROUP BY 2, 3" ) diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 0332ae1a..91556960 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -8,6 +8,11 @@ class TestPostgres(Validator): dialect = "postgres" def test_ddl(self): + expr = parse_one("CREATE TABLE t (x INTERVAL day)", read="postgres") + cdef = expr.find(exp.ColumnDef) + cdef.args["kind"].assert_is(exp.DataType) + self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL day)") + self.validate_identity("CREATE INDEX idx_x ON x USING BTREE(x, y) WHERE (NOT y IS NULL)") self.validate_identity("CREATE TABLE test (elems JSONB[])") self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)") diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py index ad85ddc6..a9d88cb0 100644 --- a/tests/dialects/test_presto.py +++ b/tests/dialects/test_presto.py @@ -544,26 +544,18 @@ class TestPresto(Validator): }, ) - def test_presto(self): - self.validate_identity("string_agg(x, ',')", "ARRAY_JOIN(ARRAY_AGG(x), ',')") - self.validate_identity( - "SELECT * FROM example.testdb.customer_orders FOR VERSION AS OF 8954597067493422955" - ) - self.validate_identity( - "SELECT * FROM example.testdb.customer_orders FOR TIMESTAMP AS OF CAST('2022-03-23 09:59:29.803 Europe/Vienna' AS TIMESTAMP)" - ) - - self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1") - self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY") - self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)") - self.validate_identity("SELECT * FROM (VALUES (1))") - self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE") - self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ") - self.validate_identity("APPROX_PERCENTILE(a, b, c, d)") - self.validate_identity( - "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))" - ) + def test_unicode_string(self): + for prefix in ("u&", "U&"): + self.validate_identity( + f"{prefix}'Hello winter \\2603 !'", + "U&'Hello winter \\2603 !'", + ) + self.validate_identity( + f"{prefix}'Hello winter #2603 !' UESCAPE '#'", + "U&'Hello winter #2603 !' UESCAPE '#'", + ) + def test_presto(self): with self.assertLogs(helper_logger) as cm: self.validate_all( "SELECT COALESCE(ELEMENT_AT(MAP_FROM_ENTRIES(ARRAY[(51, '1')]), id), quantity) FROM my_table", @@ -582,6 +574,24 @@ class TestPresto(Validator): }, ) + self.validate_identity("string_agg(x, ',')", "ARRAY_JOIN(ARRAY_AGG(x), ',')") + self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1") + self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY") + self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)") + self.validate_identity("SELECT * FROM (VALUES (1))") + self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE") + self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ") + self.validate_identity("APPROX_PERCENTILE(a, b, c, d)") + self.validate_identity( + "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))" + ) + self.validate_identity( + "SELECT * FROM example.testdb.customer_orders FOR VERSION AS OF 8954597067493422955" + ) + self.validate_identity( + "SELECT * FROM example.testdb.customer_orders FOR TIMESTAMP AS OF CAST('2022-03-23 09:59:29.803 Europe/Vienna' AS TIMESTAMP)" + ) + self.validate_all( "SELECT MAX_BY(a.id, a.timestamp) FROM a", read={ diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 13f32c13..4c2d7ca7 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -127,11 +127,45 @@ WHERE "SELECT TO_TIMESTAMP(x) FROM t", "SELECT CAST(x AS TIMESTAMPNTZ) FROM t", ) + self.validate_identity( + "CAST(x AS BYTEINT)", + "CAST(x AS INT)", + ) + self.validate_identity( + "CAST(x AS CHAR VARYING)", + "CAST(x AS VARCHAR)", + ) + self.validate_identity( + "CAST(x AS CHARACTER VARYING)", + "CAST(x AS VARCHAR)", + ) + self.validate_identity( + "CAST(x AS NCHAR VARYING)", + "CAST(x AS VARCHAR)", + ) + self.validate_identity( + "SELECT TO_ARRAY(x::ARRAY)", + "SELECT CAST(x AS ARRAY)", + ) + self.validate_identity( + "SELECT TO_ARRAY(['test']::VARIANT)", + "SELECT TO_ARRAY(CAST(['test'] AS VARIANT))", + ) - self.validate_all("CAST(x AS BYTEINT)", write={"snowflake": "CAST(x AS INT)"}) - self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) - self.validate_all("CAST(x AS CHARACTER VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) - self.validate_all("CAST(x AS NCHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) + self.validate_all( + "SELECT TO_ARRAY(['test'])", + write={ + "snowflake": "SELECT ['test']", + "spark": "SELECT ARRAY('test')", + }, + ) + self.validate_all( + "SELECT TO_ARRAY(['test'])", + write={ + "snowflake": "SELECT ['test']", + "spark": "SELECT ARRAY('test')", + }, + ) self.validate_all( # We need to qualify the columns in this query because "value" would be ambiguous 'WITH t(x, "value") AS (SELECT [1, 2, 3], 1) SELECT IFF(_u.pos = _u_2.pos_2, _u_2."value", NULL) AS "value" FROM t, TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (GREATEST(ARRAY_SIZE(t.x)) - 1) + 1))) AS _u(seq, key, path, index, pos, this) CROSS JOIN TABLE(FLATTEN(INPUT => t.x)) AS _u_2(seq, key, path, pos_2, "value", this) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > (ARRAY_SIZE(t.x) - 1) AND _u_2.pos_2 = (ARRAY_SIZE(t.x) - 1))',
Clickhouse: ORDER BY WITH FILL not working Hey sqlglot team, I am trying to integrate SQLglot further in one of my recent open source libraries [SQL Mock](https://github.com/DeepLcom/sql-mock). It is already used for some functionality but I want to extend its usage. While testing locally, I identified that the Clickhouse `ORDER BY WITH FILL` [syntax](https://clickhouse.com/docs/en/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier) generates a parsing error: **Fully reproducible code snippet** ```python import sqlglot query = """ SELECT toLastDayOfMonth(date) AS observation_month FROM ( SELECT toStartOfMonth(toDate('2023-01-01') - INTERVAL 1 MONTH) AS date ORDER BY date WITH FILL FROM toDate('2021-01-01') STEP INTERVAL 1 MONTH ) """ ast = sqlglot.parse_one(query, dialect='clickhouse') ``` Will result in ```bash ParseError: Expecting ). Line 8, Col: 10. start date we care about and fill the rest from the period_start ORDER BY date WITH FILL FROM toDate('2021-01-01') -- First day we will report on STEP ``` **Official Documentation** * [ORDER BY Expr WITH FILL Modifier Documentation](https://clickhouse.com/docs/en/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse", "tests/dialects/test_postgres.py::TestPostgres::test_ddl", "tests/dialects/test_presto.py::TestPresto::test_unicode_string", "tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake" ]
[ "tests/dialects/test_clickhouse.py::TestClickhouse::test_cte", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl", "tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization", "tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary", "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_postgres", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_variance", "tests/dialects/test_presto.py::TestPresto::test_cast", "tests/dialects/test_presto.py::TestPresto::test_ddl", "tests/dialects/test_presto.py::TestPresto::test_encode_decode", "tests/dialects/test_presto.py::TestPresto::test_hex_unhex", "tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular", "tests/dialects/test_presto.py::TestPresto::test_json", "tests/dialects/test_presto.py::TestPresto::test_match_recognize", "tests/dialects/test_presto.py::TestPresto::test_presto", "tests/dialects/test_presto.py::TestPresto::test_quotes", "tests/dialects/test_presto.py::TestPresto::test_regex", "tests/dialects/test_presto.py::TestPresto::test_time", "tests/dialects/test_presto.py::TestPresto::test_to_char", "tests/dialects/test_presto.py::TestPresto::test_unnest", "tests/dialects/test_snowflake.py::TestSnowflake::test_ddl", "tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table", "tests/dialects/test_snowflake.py::TestSnowflake::test_flatten", "tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data", "tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize", "tests/dialects/test_snowflake.py::TestSnowflake::test_minus", "tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment", "tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr", "tests/dialects/test_snowflake.py::TestSnowflake::test_sample", "tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types", "tests/dialects/test_snowflake.py::TestSnowflake::test_show", "tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files", "tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures", "tests/dialects/test_snowflake.py::TestSnowflake::test_swap", "tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal", "tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps", "tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast", "tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions", "tests/dialects/test_snowflake.py::TestSnowflake::test_values" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-12-18T15:11:32Z"
mit
tobymao__sqlglot-2709
diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index b92925dc..fd418b6f 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -80,7 +80,6 @@ jobs: with: command: upload args: --non-interactive --skip-existing * - working-directory: ./sqlglot/sqlglotrs deploy: runs-on: ubuntu-latest diff --git a/sqlglot/generator.py b/sqlglot/generator.py index c571e8fb..b0e83d21 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +import re import typing as t from collections import defaultdict from functools import reduce @@ -17,6 +18,8 @@ if t.TYPE_CHECKING: logger = logging.getLogger("sqlglot") +ESCAPED_UNICODE_RE = re.compile(r"\\(\d+)") + class Generator: """ @@ -917,11 +920,19 @@ class Generator: def unicodestring_sql(self, expression: exp.UnicodeString) -> str: this = self.sql(expression, "this") + escape = expression.args.get("escape") + if self.dialect.UNICODE_START: - escape = self.sql(expression, "escape") - escape = f" UESCAPE {escape}" if escape else "" + escape = f" UESCAPE {self.sql(escape)}" if escape else "" return f"{self.dialect.UNICODE_START}{this}{self.dialect.UNICODE_END}{escape}" - return this + + if escape: + pattern = re.compile(rf"{escape.name}(\d+)") + else: + pattern = ESCAPED_UNICODE_RE + + this = pattern.sub(r"\\u\1", this) + return f"{self.dialect.QUOTE_START}{this}{self.dialect.QUOTE_END}" def rawstring_sql(self, expression: exp.RawString) -> str: string = self.escape_str(expression.this.replace("\\", "\\\\"))
tobymao/sqlglot
1c95b1e6fcd3c1de534266b379058a1bad85c29e
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py index 97a387c6..8b5080c9 100644 --- a/tests/dialects/test_presto.py +++ b/tests/dialects/test_presto.py @@ -546,13 +546,21 @@ class TestPresto(Validator): def test_unicode_string(self): for prefix in ("u&", "U&"): - self.validate_identity( + self.validate_all( f"{prefix}'Hello winter \\2603 !'", - "U&'Hello winter \\2603 !'", + write={ + "presto": "U&'Hello winter \\2603 !'", + "snowflake": "'Hello winter \\u2603 !'", + "spark": "'Hello winter \\u2603 !'", + }, ) - self.validate_identity( + self.validate_all( f"{prefix}'Hello winter #2603 !' UESCAPE '#'", - "U&'Hello winter #2603 !' UESCAPE '#'", + write={ + "presto": "U&'Hello winter #2603 !' UESCAPE '#'", + "snowflake": "'Hello winter \\u2603 !'", + "spark": "'Hello winter \\u2603 !'", + }, ) def test_presto(self):
Unicode character escape is not correctly converted from Trino Trino uses a `U&` prefix to indicate that a string has unicode characters escaped with a `\`. https://trino.io/docs/current/language/types.html#varchar SparkSQL don't use a prefix but escapes unicode characters with a `\u`. https://spark.apache.org/docs/latest/sql-ref-literals.html#parameters **Fully reproducible code snippet** ``` in_sql = '''select U&'n\00e3o' as no''' out_sql = sqlglot.transpile(in_sql, read='trino', write='spark', pretty=True)[0] print(out_sql) ``` ** Output ** ``` SELECT U & 'ne3o' AS no ``` ** Expected Output ** ``` SELECT 'n\u00e3o' AS no ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_presto.py::TestPresto::test_unicode_string" ]
[ "tests/dialects/test_presto.py::TestPresto::test_cast", "tests/dialects/test_presto.py::TestPresto::test_ddl", "tests/dialects/test_presto.py::TestPresto::test_encode_decode", "tests/dialects/test_presto.py::TestPresto::test_hex_unhex", "tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular", "tests/dialects/test_presto.py::TestPresto::test_json", "tests/dialects/test_presto.py::TestPresto::test_match_recognize", "tests/dialects/test_presto.py::TestPresto::test_presto", "tests/dialects/test_presto.py::TestPresto::test_quotes", "tests/dialects/test_presto.py::TestPresto::test_regex", "tests/dialects/test_presto.py::TestPresto::test_time", "tests/dialects/test_presto.py::TestPresto::test_to_char", "tests/dialects/test_presto.py::TestPresto::test_unnest" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-12-19T19:05:11Z"
mit
tobymao__sqlglot-2739
diff --git a/sqlglot/transforms.py b/sqlglot/transforms.py index 03acc2b2..0da65b51 100644 --- a/sqlglot/transforms.py +++ b/sqlglot/transforms.py @@ -255,7 +255,7 @@ def explode_to_unnest(index_offset: int = 0) -> t.Callable[[exp.Expression], exp if not arrays: if expression.args.get("from"): - expression.join(series, copy=False) + expression.join(series, copy=False, join_type="CROSS") else: expression.from_(series, copy=False)
tobymao/sqlglot
1ebfb3688975e420a70bac10c49ad127446c4c65
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py index 191f24d8..81219886 100644 --- a/tests/dialects/test_bigquery.py +++ b/tests/dialects/test_bigquery.py @@ -368,7 +368,7 @@ class TestBigQuery(Validator): }, ) self.validate_all( - "WITH cte AS (SELECT [1, 2, 3] AS arr) SELECT IF(pos = pos_2, col, NULL) AS col FROM cte, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(arr)) - 1)) AS pos CROSS JOIN UNNEST(arr) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(arr) - 1) AND pos_2 = (ARRAY_LENGTH(arr) - 1))", + "WITH cte AS (SELECT [1, 2, 3] AS arr) SELECT IF(pos = pos_2, col, NULL) AS col FROM cte CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(arr)) - 1)) AS pos CROSS JOIN UNNEST(arr) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(arr) - 1) AND pos_2 = (ARRAY_LENGTH(arr) - 1))", read={ "spark": "WITH cte AS (SELECT ARRAY(1, 2, 3) AS arr) SELECT EXPLODE(arr) FROM cte" }, diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py index 543739a1..021349b1 100644 --- a/tests/dialects/test_duckdb.py +++ b/tests/dialects/test_duckdb.py @@ -103,8 +103,8 @@ class TestDuckDB(Validator): self.validate_all( "SELECT UNNEST(ARRAY[1, 2, 3]), UNNEST(ARRAY[4, 5]), UNNEST(ARRAY[6]) FROM x", write={ - "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_3, col_2, NULL) AS col_2, IF(pos = pos_4, col_3, NULL) AS col_3 FROM x, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH([1, 2, 3]), ARRAY_LENGTH([4, 5]), ARRAY_LENGTH([6])) - 1)) AS pos CROSS JOIN UNNEST([1, 2, 3]) AS col WITH OFFSET AS pos_2 CROSS JOIN UNNEST([4, 5]) AS col_2 WITH OFFSET AS pos_3 CROSS JOIN UNNEST([6]) AS col_3 WITH OFFSET AS pos_4 WHERE ((pos = pos_2 OR (pos > (ARRAY_LENGTH([1, 2, 3]) - 1) AND pos_2 = (ARRAY_LENGTH([1, 2, 3]) - 1))) AND (pos = pos_3 OR (pos > (ARRAY_LENGTH([4, 5]) - 1) AND pos_3 = (ARRAY_LENGTH([4, 5]) - 1)))) AND (pos = pos_4 OR (pos > (ARRAY_LENGTH([6]) - 1) AND pos_4 = (ARRAY_LENGTH([6]) - 1)))", - "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col, IF(_u.pos = _u_3.pos_3, _u_3.col_2) AS col_2, IF(_u.pos = _u_4.pos_4, _u_4.col_3) AS col_3 FROM x, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[1, 2, 3]), CARDINALITY(ARRAY[4, 5]), CARDINALITY(ARRAY[6])))) AS _u(pos) CROSS JOIN UNNEST(ARRAY[1, 2, 3]) WITH ORDINALITY AS _u_2(col, pos_2) CROSS JOIN UNNEST(ARRAY[4, 5]) WITH ORDINALITY AS _u_3(col_2, pos_3) CROSS JOIN UNNEST(ARRAY[6]) WITH ORDINALITY AS _u_4(col_3, pos_4) WHERE ((_u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(ARRAY[1, 2, 3]) AND _u_2.pos_2 = CARDINALITY(ARRAY[1, 2, 3]))) AND (_u.pos = _u_3.pos_3 OR (_u.pos > CARDINALITY(ARRAY[4, 5]) AND _u_3.pos_3 = CARDINALITY(ARRAY[4, 5])))) AND (_u.pos = _u_4.pos_4 OR (_u.pos > CARDINALITY(ARRAY[6]) AND _u_4.pos_4 = CARDINALITY(ARRAY[6])))", + "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_3, col_2, NULL) AS col_2, IF(pos = pos_4, col_3, NULL) AS col_3 FROM x CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH([1, 2, 3]), ARRAY_LENGTH([4, 5]), ARRAY_LENGTH([6])) - 1)) AS pos CROSS JOIN UNNEST([1, 2, 3]) AS col WITH OFFSET AS pos_2 CROSS JOIN UNNEST([4, 5]) AS col_2 WITH OFFSET AS pos_3 CROSS JOIN UNNEST([6]) AS col_3 WITH OFFSET AS pos_4 WHERE ((pos = pos_2 OR (pos > (ARRAY_LENGTH([1, 2, 3]) - 1) AND pos_2 = (ARRAY_LENGTH([1, 2, 3]) - 1))) AND (pos = pos_3 OR (pos > (ARRAY_LENGTH([4, 5]) - 1) AND pos_3 = (ARRAY_LENGTH([4, 5]) - 1)))) AND (pos = pos_4 OR (pos > (ARRAY_LENGTH([6]) - 1) AND pos_4 = (ARRAY_LENGTH([6]) - 1)))", + "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col, IF(_u.pos = _u_3.pos_3, _u_3.col_2) AS col_2, IF(_u.pos = _u_4.pos_4, _u_4.col_3) AS col_3 FROM x CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[1, 2, 3]), CARDINALITY(ARRAY[4, 5]), CARDINALITY(ARRAY[6])))) AS _u(pos) CROSS JOIN UNNEST(ARRAY[1, 2, 3]) WITH ORDINALITY AS _u_2(col, pos_2) CROSS JOIN UNNEST(ARRAY[4, 5]) WITH ORDINALITY AS _u_3(col_2, pos_3) CROSS JOIN UNNEST(ARRAY[6]) WITH ORDINALITY AS _u_4(col_3, pos_4) WHERE ((_u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(ARRAY[1, 2, 3]) AND _u_2.pos_2 = CARDINALITY(ARRAY[1, 2, 3]))) AND (_u.pos = _u_3.pos_3 OR (_u.pos > CARDINALITY(ARRAY[4, 5]) AND _u_3.pos_3 = CARDINALITY(ARRAY[4, 5])))) AND (_u.pos = _u_4.pos_4 OR (_u.pos > CARDINALITY(ARRAY[6]) AND _u_4.pos_4 = CARDINALITY(ARRAY[6])))", }, ) self.validate_all( diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index ed5823b2..882b7f04 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -158,7 +158,7 @@ class TestPostgres(Validator): write={ "hive": "SELECT EXPLODE(c) FROM t", "postgres": "SELECT UNNEST(c) FROM t", - "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM t, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(c)))) AS _u(pos) CROSS JOIN UNNEST(c) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(c) AND _u_2.pos_2 = CARDINALITY(c))", + "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM t CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(c)))) AS _u(pos) CROSS JOIN UNNEST(c) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(c) AND _u_2.pos_2 = CARDINALITY(c))", }, ) self.validate_all( diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 4d8168ac..aa2cf363 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -162,7 +162,7 @@ WHERE ) self.validate_all( # We need to qualify the columns in this query because "value" would be ambiguous - 'WITH t(x, "value") AS (SELECT [1, 2, 3], 1) SELECT IFF(_u.pos = _u_2.pos_2, _u_2."value", NULL) AS "value" FROM t, TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (GREATEST(ARRAY_SIZE(t.x)) - 1) + 1))) AS _u(seq, key, path, index, pos, this) CROSS JOIN TABLE(FLATTEN(INPUT => t.x)) AS _u_2(seq, key, path, pos_2, "value", this) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > (ARRAY_SIZE(t.x) - 1) AND _u_2.pos_2 = (ARRAY_SIZE(t.x) - 1))', + 'WITH t(x, "value") AS (SELECT [1, 2, 3], 1) SELECT IFF(_u.pos = _u_2.pos_2, _u_2."value", NULL) AS "value" FROM t CROSS JOIN TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (GREATEST(ARRAY_SIZE(t.x)) - 1) + 1))) AS _u(seq, key, path, index, pos, this) CROSS JOIN TABLE(FLATTEN(INPUT => t.x)) AS _u_2(seq, key, path, pos_2, "value", this) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > (ARRAY_SIZE(t.x) - 1) AND _u_2.pos_2 = (ARRAY_SIZE(t.x) - 1))', read={ "duckdb": 'WITH t(x, "value") AS (SELECT [1,2,3], 1) SELECT UNNEST(t.x) AS "value" FROM t', }, diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py index 60c1a660..46e626cc 100644 --- a/tests/dialects/test_spark.py +++ b/tests/dialects/test_spark.py @@ -624,23 +624,23 @@ TBLPROPERTIES ( self.validate_all( "SELECT EXPLODE(x) FROM tbl", write={ - "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col FROM tbl, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(x)) - 1)) AS pos CROSS JOIN UNNEST(x) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(x) - 1) AND pos_2 = (ARRAY_LENGTH(x) - 1))", - "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM tbl, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(x)))) AS _u(pos) CROSS JOIN UNNEST(x) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(x) AND _u_2.pos_2 = CARDINALITY(x))", + "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col FROM tbl CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(x)) - 1)) AS pos CROSS JOIN UNNEST(x) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(x) - 1) AND pos_2 = (ARRAY_LENGTH(x) - 1))", + "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM tbl CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(x)))) AS _u(pos) CROSS JOIN UNNEST(x) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(x) AND _u_2.pos_2 = CARDINALITY(x))", "spark": "SELECT EXPLODE(x) FROM tbl", }, ) self.validate_all( "SELECT EXPLODE(col) FROM _u", write={ - "bigquery": "SELECT IF(pos = pos_2, col_2, NULL) AS col_2 FROM _u, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(col)) - 1)) AS pos CROSS JOIN UNNEST(col) AS col_2 WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(col) - 1) AND pos_2 = (ARRAY_LENGTH(col) - 1))", - "presto": "SELECT IF(_u_2.pos = _u_3.pos_2, _u_3.col_2) AS col_2 FROM _u, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(col)))) AS _u_2(pos) CROSS JOIN UNNEST(col) WITH ORDINALITY AS _u_3(col_2, pos_2) WHERE _u_2.pos = _u_3.pos_2 OR (_u_2.pos > CARDINALITY(col) AND _u_3.pos_2 = CARDINALITY(col))", + "bigquery": "SELECT IF(pos = pos_2, col_2, NULL) AS col_2 FROM _u CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(col)) - 1)) AS pos CROSS JOIN UNNEST(col) AS col_2 WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(col) - 1) AND pos_2 = (ARRAY_LENGTH(col) - 1))", + "presto": "SELECT IF(_u_2.pos = _u_3.pos_2, _u_3.col_2) AS col_2 FROM _u CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(col)))) AS _u_2(pos) CROSS JOIN UNNEST(col) WITH ORDINALITY AS _u_3(col_2, pos_2) WHERE _u_2.pos = _u_3.pos_2 OR (_u_2.pos > CARDINALITY(col) AND _u_3.pos_2 = CARDINALITY(col))", "spark": "SELECT EXPLODE(col) FROM _u", }, ) self.validate_all( "SELECT EXPLODE(col) AS exploded FROM schema.tbl", write={ - "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.exploded) AS exploded FROM schema.tbl, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(col)))) AS _u(pos) CROSS JOIN UNNEST(col) WITH ORDINALITY AS _u_2(exploded, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(col) AND _u_2.pos_2 = CARDINALITY(col))", + "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.exploded) AS exploded FROM schema.tbl CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(col)))) AS _u(pos) CROSS JOIN UNNEST(col) WITH ORDINALITY AS _u_2(exploded, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(col) AND _u_2.pos_2 = CARDINALITY(col))", }, ) self.validate_all( @@ -666,13 +666,13 @@ TBLPROPERTIES ( self.validate_all( "SELECT POSEXPLODE(ARRAY(2, 3)), EXPLODE(ARRAY(4, 5, 6)) FROM tbl", write={ - "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_2, pos_2, NULL) AS pos_2, IF(pos = pos_3, col_2, NULL) AS col_2 FROM tbl, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH([2, 3]), ARRAY_LENGTH([4, 5, 6])) - 1)) AS pos CROSS JOIN UNNEST([2, 3]) AS col WITH OFFSET AS pos_2 CROSS JOIN UNNEST([4, 5, 6]) AS col_2 WITH OFFSET AS pos_3 WHERE (pos = pos_2 OR (pos > (ARRAY_LENGTH([2, 3]) - 1) AND pos_2 = (ARRAY_LENGTH([2, 3]) - 1))) AND (pos = pos_3 OR (pos > (ARRAY_LENGTH([4, 5, 6]) - 1) AND pos_3 = (ARRAY_LENGTH([4, 5, 6]) - 1)))", - "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col, IF(_u.pos = _u_2.pos_2, _u_2.pos_2) AS pos_2, IF(_u.pos = _u_3.pos_3, _u_3.col_2) AS col_2 FROM tbl, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[2, 3]), CARDINALITY(ARRAY[4, 5, 6])))) AS _u(pos) CROSS JOIN UNNEST(ARRAY[2, 3]) WITH ORDINALITY AS _u_2(col, pos_2) CROSS JOIN UNNEST(ARRAY[4, 5, 6]) WITH ORDINALITY AS _u_3(col_2, pos_3) WHERE (_u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(ARRAY[2, 3]) AND _u_2.pos_2 = CARDINALITY(ARRAY[2, 3]))) AND (_u.pos = _u_3.pos_3 OR (_u.pos > CARDINALITY(ARRAY[4, 5, 6]) AND _u_3.pos_3 = CARDINALITY(ARRAY[4, 5, 6])))", + "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_2, pos_2, NULL) AS pos_2, IF(pos = pos_3, col_2, NULL) AS col_2 FROM tbl CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH([2, 3]), ARRAY_LENGTH([4, 5, 6])) - 1)) AS pos CROSS JOIN UNNEST([2, 3]) AS col WITH OFFSET AS pos_2 CROSS JOIN UNNEST([4, 5, 6]) AS col_2 WITH OFFSET AS pos_3 WHERE (pos = pos_2 OR (pos > (ARRAY_LENGTH([2, 3]) - 1) AND pos_2 = (ARRAY_LENGTH([2, 3]) - 1))) AND (pos = pos_3 OR (pos > (ARRAY_LENGTH([4, 5, 6]) - 1) AND pos_3 = (ARRAY_LENGTH([4, 5, 6]) - 1)))", + "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col, IF(_u.pos = _u_2.pos_2, _u_2.pos_2) AS pos_2, IF(_u.pos = _u_3.pos_3, _u_3.col_2) AS col_2 FROM tbl CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[2, 3]), CARDINALITY(ARRAY[4, 5, 6])))) AS _u(pos) CROSS JOIN UNNEST(ARRAY[2, 3]) WITH ORDINALITY AS _u_2(col, pos_2) CROSS JOIN UNNEST(ARRAY[4, 5, 6]) WITH ORDINALITY AS _u_3(col_2, pos_3) WHERE (_u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(ARRAY[2, 3]) AND _u_2.pos_2 = CARDINALITY(ARRAY[2, 3]))) AND (_u.pos = _u_3.pos_3 OR (_u.pos > CARDINALITY(ARRAY[4, 5, 6]) AND _u_3.pos_3 = CARDINALITY(ARRAY[4, 5, 6])))", }, ) self.validate_all( "SELECT col, pos, POSEXPLODE(ARRAY(2, 3)) FROM _u", write={ - "presto": "SELECT col, pos, IF(_u_2.pos_2 = _u_3.pos_3, _u_3.col_2) AS col_2, IF(_u_2.pos_2 = _u_3.pos_3, _u_3.pos_3) AS pos_3 FROM _u, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[2, 3])))) AS _u_2(pos_2) CROSS JOIN UNNEST(ARRAY[2, 3]) WITH ORDINALITY AS _u_3(col_2, pos_3) WHERE _u_2.pos_2 = _u_3.pos_3 OR (_u_2.pos_2 > CARDINALITY(ARRAY[2, 3]) AND _u_3.pos_3 = CARDINALITY(ARRAY[2, 3]))", + "presto": "SELECT col, pos, IF(_u_2.pos_2 = _u_3.pos_3, _u_3.col_2) AS col_2, IF(_u_2.pos_2 = _u_3.pos_3, _u_3.pos_3) AS pos_3 FROM _u CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[2, 3])))) AS _u_2(pos_2) CROSS JOIN UNNEST(ARRAY[2, 3]) WITH ORDINALITY AS _u_3(col_2, pos_3) WHERE _u_2.pos_2 = _u_3.pos_3 OR (_u_2.pos_2 > CARDINALITY(ARRAY[2, 3]) AND _u_3.pos_3 = CARDINALITY(ARRAY[2, 3]))", }, )
`explode_to_unnest` transformation generates query that cannot be executed with trino sqlglot code: ``` In [8]: import sqlglot as sg In [9]: print( ...: sg.parse_one( ...: "select unnest(t.x) from (values [1, 2, 3] as t (x))", read="duckdb" ...: ).sql("trino", pretty=True) ...: ) SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM (VALUES (ARRAY[1, 2, 3])) AS t(x), UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos) CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR ( _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x) ) ``` trino-cli: ``` trino:default> SELECT -> IF(_u.pos = _u_2.pos_2, _u_2.col) AS col -> FROM (VALUES -> (ARRAY[1, 2, 3])) AS t(x), UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos) -> CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2) -> WHERE -> _u.pos = _u_2.pos_2 -> OR ( -> _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x) -> ); Query 20231230_105739_28099_gh8pj failed: line 4:70: Column 't.x' cannot be resolved ``` Changing the first `,` to be `CROSS JOIN` instead fixes the issue: ``` trino:default> SELECT -> IF(_u.pos = _u_2.pos_2, _u_2.col) AS col -> FROM (VALUES -> (ARRAY[1, 2, 3])) AS t(x) CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos) -> CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2) -> WHERE -> _u.pos = _u_2.pos_2 -> OR ( -> _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x) -> ); col ----- 1 2 3 (3 rows) Query 20231230_105747_28107_gh8pj, FINISHED, 1 node Splits: 17 total, 17 done (100.00%) 0.08 [0 rows, 0B] [0 rows/s, 0B/s] ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery", "tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake", "tests/dialects/test_spark.py::TestSpark::test_explode_to_unnest" ]
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat", "tests/dialects/test_bigquery.py::TestBigQuery::test_json_object", "tests/dialects/test_bigquery.py::TestBigQuery::test_merge", "tests/dialects/test_bigquery.py::TestBigQuery::test_models", "tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names", "tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types", "tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table", "tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions", "tests/dialects/test_duckdb.py::TestDuckDB::test_array", "tests/dialects/test_duckdb.py::TestDuckDB::test_array_index", "tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or", "tests/dialects/test_duckdb.py::TestDuckDB::test_cast", "tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode", "tests/dialects/test_duckdb.py::TestDuckDB::test_isinf", "tests/dialects/test_duckdb.py::TestDuckDB::test_isnan", "tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table", "tests/dialects/test_duckdb.py::TestDuckDB::test_sample", "tests/dialects/test_duckdb.py::TestDuckDB::test_time", "tests/dialects/test_duckdb.py::TestDuckDB::test_timestamps_with_units", "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_ddl", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_postgres", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_variance", "tests/dialects/test_snowflake.py::TestSnowflake::test_ddl", "tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table", "tests/dialects/test_snowflake.py::TestSnowflake::test_flatten", "tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data", "tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize", "tests/dialects/test_snowflake.py::TestSnowflake::test_minus", "tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment", "tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr", "tests/dialects/test_snowflake.py::TestSnowflake::test_sample", "tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types", "tests/dialects/test_snowflake.py::TestSnowflake::test_show", "tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files", "tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures", "tests/dialects/test_snowflake.py::TestSnowflake::test_swap", "tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal", "tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps", "tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast", "tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions", "tests/dialects/test_snowflake.py::TestSnowflake::test_values", "tests/dialects/test_spark.py::TestSpark::test_bool_or", "tests/dialects/test_spark.py::TestSpark::test_current_user", "tests/dialects/test_spark.py::TestSpark::test_ddl", "tests/dialects/test_spark.py::TestSpark::test_hint", "tests/dialects/test_spark.py::TestSpark::test_iif", "tests/dialects/test_spark.py::TestSpark::test_insert_cte", "tests/dialects/test_spark.py::TestSpark::test_spark", "tests/dialects/test_spark.py::TestSpark::test_to_date", "tests/dialects/test_spark.py::TestSpark::test_transform_query" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2023-12-31T13:02:38Z"
mit
tobymao__sqlglot-2769
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py index 230e529f..d89ac5f9 100644 --- a/sqlglot/dialects/duckdb.py +++ b/sqlglot/dialects/duckdb.py @@ -17,6 +17,7 @@ from sqlglot.dialects.dialect import ( encode_decode_sql, format_time_lambda, inline_array_sql, + json_keyvalue_comma_sql, no_comment_column_constraint_sql, no_properties_sql, no_safe_divide_sql, @@ -349,11 +350,12 @@ class DuckDB(Dialect): exp.IntDiv: lambda self, e: self.binary(e, "//"), exp.IsInf: rename_func("ISINF"), exp.IsNan: rename_func("ISNAN"), + exp.JSONBExtract: arrow_json_extract_sql, + exp.JSONBExtractScalar: arrow_json_extract_scalar_sql, exp.JSONExtract: arrow_json_extract_sql, exp.JSONExtractScalar: arrow_json_extract_scalar_sql, exp.JSONFormat: _json_format_sql, - exp.JSONBExtract: arrow_json_extract_sql, - exp.JSONBExtractScalar: arrow_json_extract_scalar_sql, + exp.JSONKeyValue: json_keyvalue_comma_sql, exp.LogicalOr: rename_func("BOOL_OR"), exp.LogicalAnd: rename_func("BOOL_AND"), exp.MonthsBetween: lambda self, e: self.func( diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index 6b5f71b3..f6bc55d5 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -14,6 +14,7 @@ from sqlglot.dialects.dialect import ( format_time_lambda, if_sql, inline_array_sql, + json_keyvalue_comma_sql, max_or_greatest, min_or_least, rename_func, @@ -445,6 +446,7 @@ class Snowflake(Dialect): FUNCTION_PARSERS = { **parser.Parser.FUNCTION_PARSERS, "DATE_PART": _parse_date_part, + "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(), } FUNCTION_PARSERS.pop("TRIM") @@ -694,6 +696,8 @@ class Snowflake(Dialect): exp.GroupConcat: rename_func("LISTAGG"), exp.If: if_sql(name="IFF", false_value="NULL"), exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]", + exp.JSONKeyValue: json_keyvalue_comma_sql, + exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions), exp.LogicalAnd: rename_func("BOOLAND_AGG"), exp.LogicalOr: rename_func("BOOLOR_AGG"), exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
tobymao/sqlglot
a2abbc773fb330e669c81abc115a81e1055a060f
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index f42a3315..5de56573 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -174,6 +174,18 @@ WHERE "CAST(x AS VARCHAR)", ) + self.validate_all( + "OBJECT_CONSTRUCT_KEEP_NULL('key_1', 'one', 'key_2', NULL)", + read={ + "bigquery": "JSON_OBJECT(['key_1', 'key_2'], ['one', NULL])", + "duckdb": "JSON_OBJECT('key_1', 'one', 'key_2', NULL)", + }, + write={ + "bigquery": "JSON_OBJECT('key_1', 'one', 'key_2', NULL)", + "duckdb": "JSON_OBJECT('key_1', 'one', 'key_2', NULL)", + "snowflake": "OBJECT_CONSTRUCT_KEEP_NULL('key_1', 'one', 'key_2', NULL)", + }, + ) self.validate_all( "SELECT * FROM example TABLESAMPLE (3) SEED (82)", read={
Support OBJECT_CONSTRUCT_KEEP_NULL (Snowflake) A [Snowflake OBJECT_CONSTRUCT_KEEP_NULL](https://docs.snowflake.com/en/sql-reference/functions/object_construct_keep_null#examples) example, where the key is NULL: ```sql SELECT OBJECT_CONSTRUCT_KEEP_NULL('key_1', 'one', NULL, 'two') AS KEEP_NULL_2 {\n "key_1": "one"\n} ``` which maps to JSON_OBJECT in duckdb, eg: ```sql SELECT JSON_OBJECT('key_1', 'one', NULL, 'two') AS KEEP_NULL_2 {"key_1":"one"} ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake" ]
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_ddl", "tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table", "tests/dialects/test_snowflake.py::TestSnowflake::test_flatten", "tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data", "tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize", "tests/dialects/test_snowflake.py::TestSnowflake::test_minus", "tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment", "tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr", "tests/dialects/test_snowflake.py::TestSnowflake::test_sample", "tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types", "tests/dialects/test_snowflake.py::TestSnowflake::test_show", "tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files", "tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures", "tests/dialects/test_snowflake.py::TestSnowflake::test_swap", "tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal", "tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps", "tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast", "tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions", "tests/dialects/test_snowflake.py::TestSnowflake::test_values" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-04T15:47:47Z"
mit
tobymao__sqlglot-2770
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py index aefe625b..a555f5c4 100644 --- a/sqlglot/dialects/tsql.py +++ b/sqlglot/dialects/tsql.py @@ -730,6 +730,17 @@ class TSQL(Dialect): exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, } + def lateral_op(self, expression: exp.Lateral) -> str: + cross_apply = expression.args.get("cross_apply") + if cross_apply is True: + return "CROSS APPLY" + if cross_apply is False: + return "OUTER APPLY" + + # TODO: perhaps we can check if the parent is a Join and transpile it appropriately + self.unsupported("LATERAL clause is not supported.") + return "LATERAL" + def timefromparts_sql(self, expression: exp.TimeFromParts) -> str: nano = expression.args.get("nano") if nano is not None: diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 56580dcf..9609c332 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1925,7 +1925,13 @@ class Join(Expression): class Lateral(UDTF): - arg_types = {"this": True, "view": False, "outer": False, "alias": False} + arg_types = { + "this": True, + "view": False, + "outer": False, + "alias": False, + "cross_apply": False, # True -> CROSS APPLY, False -> OUTER APPLY + } class MatchRecognize(Expression): diff --git a/sqlglot/generator.py b/sqlglot/generator.py index c9adb835..98337fc6 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1686,7 +1686,8 @@ class Generator: if not on_sql and using: on_sql = csv(*(self.sql(column) for column in using)) - this_sql = self.sql(expression, "this") + this = expression.this + this_sql = self.sql(this) if on_sql: on_sql = self.indent(on_sql, skip_first=True) @@ -1696,6 +1697,9 @@ class Generator: else: on_sql = f"{space}ON {on_sql}" elif not op_sql: + if isinstance(this, exp.Lateral) and this.args.get("cross_apply") is not None: + return f" {this_sql}" + return f", {this_sql}" op_sql = f"{op_sql} JOIN" if op_sql else "JOIN" @@ -1706,6 +1710,19 @@ class Generator: args = f"({args})" if len(args.split(",")) > 1 else args return f"{args} {arrow_sep} {self.sql(expression, 'this')}" + def lateral_op(self, expression: exp.Lateral) -> str: + cross_apply = expression.args.get("cross_apply") + + # https://www.mssqltips.com/sqlservertip/1958/sql-server-cross-apply-and-outer-apply/ + if cross_apply is True: + op = "INNER JOIN " + elif cross_apply is False: + op = "LEFT JOIN " + else: + op = "" + + return f"{op}LATERAL" + def lateral_sql(self, expression: exp.Lateral) -> str: this = self.sql(expression, "this") @@ -1719,7 +1736,7 @@ class Generator: alias = self.sql(expression, "alias") alias = f" AS {alias}" if alias else "" - return f"LATERAL {this}{alias}" + return f"{self.lateral_op(expression)} {this}{alias}" def limit_sql(self, expression: exp.Limit, top: bool = False) -> str: this = self.sql(expression, "this") diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 89577cd5..40a71da5 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -2493,13 +2493,14 @@ class Parser(metaclass=_Parser): ) def _parse_lateral(self) -> t.Optional[exp.Lateral]: - outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY) cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY) + if not cross_apply and self._match_pair(TokenType.OUTER, TokenType.APPLY): + cross_apply = False - if outer_apply or cross_apply: + if cross_apply is not None: this = self._parse_select(table=True) view = None - outer = not cross_apply + outer = None elif self._match(TokenType.LATERAL): this = self._parse_select(table=True) view = self._match(TokenType.VIEW) @@ -2532,7 +2533,14 @@ class Parser(metaclass=_Parser): else: table_alias = self._parse_table_alias() - return self.expression(exp.Lateral, this=this, view=view, outer=outer, alias=table_alias) + return self.expression( + exp.Lateral, + this=this, + view=view, + outer=outer, + alias=table_alias, + cross_apply=cross_apply, + ) def _parse_join_parts( self, @@ -2566,9 +2574,6 @@ class Parser(metaclass=_Parser): if not skip_join_token and not join and not outer_apply and not cross_apply: return None - if outer_apply: - side = Token(TokenType.LEFT, "LEFT") - kwargs: t.Dict[str, t.Any] = {"this": self._parse_table(parse_bracket=parse_bracket)} if method:
tobymao/sqlglot
f65ed4d86517edd266ab9daf75570ea673a447af
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py index e9c0a84d..fde88d70 100644 --- a/tests/dialects/test_tsql.py +++ b/tests/dialects/test_tsql.py @@ -1247,39 +1247,45 @@ WHERE self.validate_all( "SELECT x.a, x.b, t.v, t.y FROM x CROSS APPLY (SELECT v, y FROM t) t(v, y)", write={ - "spark": "SELECT x.a, x.b, t.v, t.y FROM x, LATERAL (SELECT v, y FROM t) AS t(v, y)", + "spark": "SELECT x.a, x.b, t.v, t.y FROM x INNER JOIN LATERAL (SELECT v, y FROM t) AS t(v, y)", + "tsql": "SELECT x.a, x.b, t.v, t.y FROM x CROSS APPLY (SELECT v, y FROM t) AS t(v, y)", }, ) self.validate_all( "SELECT x.a, x.b, t.v, t.y FROM x OUTER APPLY (SELECT v, y FROM t) t(v, y)", write={ "spark": "SELECT x.a, x.b, t.v, t.y FROM x LEFT JOIN LATERAL (SELECT v, y FROM t) AS t(v, y)", + "tsql": "SELECT x.a, x.b, t.v, t.y FROM x OUTER APPLY (SELECT v, y FROM t) AS t(v, y)", }, ) self.validate_all( "SELECT x.a, x.b, t.v, t.y, s.v, s.y FROM x OUTER APPLY (SELECT v, y FROM t) t(v, y) OUTER APPLY (SELECT v, y FROM t) s(v, y) LEFT JOIN z ON z.id = s.id", write={ "spark": "SELECT x.a, x.b, t.v, t.y, s.v, s.y FROM x LEFT JOIN LATERAL (SELECT v, y FROM t) AS t(v, y) LEFT JOIN LATERAL (SELECT v, y FROM t) AS s(v, y) LEFT JOIN z ON z.id = s.id", + "tsql": "SELECT x.a, x.b, t.v, t.y, s.v, s.y FROM x OUTER APPLY (SELECT v, y FROM t) AS t(v, y) OUTER APPLY (SELECT v, y FROM t) AS s(v, y) LEFT JOIN z ON z.id = s.id", }, ) def test_lateral_table_valued_function(self): self.validate_all( - "SELECT t.x, y.z FROM x CROSS APPLY tvfTest(t.x)y(z)", + "SELECT t.x, y.z FROM x CROSS APPLY tvfTest(t.x) y(z)", write={ - "spark": "SELECT t.x, y.z FROM x, LATERAL TVFTEST(t.x) AS y(z)", + "spark": "SELECT t.x, y.z FROM x INNER JOIN LATERAL TVFTEST(t.x) AS y(z)", + "tsql": "SELECT t.x, y.z FROM x CROSS APPLY TVFTEST(t.x) AS y(z)", }, ) self.validate_all( "SELECT t.x, y.z FROM x OUTER APPLY tvfTest(t.x)y(z)", write={ "spark": "SELECT t.x, y.z FROM x LEFT JOIN LATERAL TVFTEST(t.x) AS y(z)", + "tsql": "SELECT t.x, y.z FROM x OUTER APPLY TVFTEST(t.x) AS y(z)", }, ) self.validate_all( "SELECT t.x, y.z FROM x OUTER APPLY a.b.tvfTest(t.x)y(z)", write={ "spark": "SELECT t.x, y.z FROM x LEFT JOIN LATERAL a.b.TVFTEST(t.x) AS y(z)", + "tsql": "SELECT t.x, y.z FROM x OUTER APPLY a.b.TVFTEST(t.x) AS y(z)", }, )
CROSS APPLY is changed to LATERAL (tsql) Code to reproduce: ``` import sqlglot input_sql = """ SELECT sd1.id, sd1.item_id, sd1.ds, FROM sqlmesh_example.seed_model AS sd1 CROSS APPLY ( SELECT TOP 1 sd2.id FROM sqlmesh_example.seed_model AS sd2 WHERE sd1.id = sd2.id ) """ sqlglot.transpile(input_sql, read="tsql") ``` Code output: `'SELECT sd1.id, sd1.item_id, sd1.ds FROM sqlmesh_example.seed_model AS sd1, LATERAL (SELECT TOP 1 sd2.id FROM sqlmesh_example.seed_model AS sd2 WHERE sd1.id = sd2.id)` Error when executing query - coming from inner-select inside parenthases: `SQL Error [156] [S0001]: Incorrect syntax near the keyword 'SELECT'.` While LATERAL is reserved in tsql, there is no functional use. I would expect that CROSS APPLY would stay CROSS APPLY when transpiling back to tsql or when transpiling inner LATERAL joins from other dialects to tsql.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery", "tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function" ]
[ "tests/dialects/test_tsql.py::TestTSQL::test__types_ints", "tests/dialects/test_tsql.py::TestTSQL::test_add_date", "tests/dialects/test_tsql.py::TestTSQL::test_charindex", "tests/dialects/test_tsql.py::TestTSQL::test_commit", "tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format", "tests/dialects/test_tsql.py::TestTSQL::test_current_user", "tests/dialects/test_tsql.py::TestTSQL::test_date_diff", "tests/dialects/test_tsql.py::TestTSQL::test_datefromparts", "tests/dialects/test_tsql.py::TestTSQL::test_datename", "tests/dialects/test_tsql.py::TestTSQL::test_datepart", "tests/dialects/test_tsql.py::TestTSQL::test_ddl", "tests/dialects/test_tsql.py::TestTSQL::test_eomonth", "tests/dialects/test_tsql.py::TestTSQL::test_format", "tests/dialects/test_tsql.py::TestTSQL::test_fullproc", "tests/dialects/test_tsql.py::TestTSQL::test_hints", "tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes", "tests/dialects/test_tsql.py::TestTSQL::test_iif", "tests/dialects/test_tsql.py::TestTSQL::test_insert_cte", "tests/dialects/test_tsql.py::TestTSQL::test_isnull", "tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue", "tests/dialects/test_tsql.py::TestTSQL::test_len", "tests/dialects/test_tsql.py::TestTSQL::test_openjson", "tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords", "tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs", "tests/dialects/test_tsql.py::TestTSQL::test_replicate", "tests/dialects/test_tsql.py::TestTSQL::test_rollback", "tests/dialects/test_tsql.py::TestTSQL::test_set", "tests/dialects/test_tsql.py::TestTSQL::test_string", "tests/dialects/test_tsql.py::TestTSQL::test_system_time", "tests/dialects/test_tsql.py::TestTSQL::test_temp_table", "tests/dialects/test_tsql.py::TestTSQL::test_temporal_table", "tests/dialects/test_tsql.py::TestTSQL::test_top", "tests/dialects/test_tsql.py::TestTSQL::test_transaction", "tests/dialects/test_tsql.py::TestTSQL::test_tsql", "tests/dialects/test_tsql.py::TestTSQL::test_types", "tests/dialects/test_tsql.py::TestTSQL::test_types_bin", "tests/dialects/test_tsql.py::TestTSQL::test_types_date", "tests/dialects/test_tsql.py::TestTSQL::test_types_decimals", "tests/dialects/test_tsql.py::TestTSQL::test_types_string", "tests/dialects/test_tsql.py::TestTSQL::test_udf" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-04T20:47:38Z"
mit
tobymao__sqlglot-2795
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py index eb3be0a9..795a04e5 100644 --- a/sqlglot/dialects/clickhouse.py +++ b/sqlglot/dialects/clickhouse.py @@ -23,16 +23,25 @@ def _lower_func(sql: str) -> str: return sql[:index].lower() + sql[index:] -def _quantile_sql(self, e): +def _quantile_sql(self: ClickHouse.Generator, e: exp.Quantile) -> str: quantile = e.args["quantile"] args = f"({self.sql(e, 'this')})" + if isinstance(quantile, exp.Array): func = self.func("quantiles", *quantile) else: func = self.func("quantile", quantile) + return func + args +def _parse_count_if(args: t.List) -> exp.CountIf | exp.CombinedAggFunc: + if len(args) == 1: + return exp.CountIf(this=seq_get(args, 0)) + + return exp.CombinedAggFunc(this="countIf", expressions=args, parts=("count", "If")) + + class ClickHouse(Dialect): NORMALIZE_FUNCTIONS: bool | str = False NULL_ORDERING = "nulls_are_last" @@ -92,6 +101,7 @@ class ClickHouse(Dialect): FUNCTIONS = { **parser.Parser.FUNCTIONS, "ANY": exp.AnyValue.from_arg_list, + "COUNTIF": _parse_count_if, "DATE_ADD": lambda args: exp.DateAdd( this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) ), @@ -542,6 +552,7 @@ class ClickHouse(Dialect): exp.ArgMin: arg_max_or_min_no_count("argMin"), exp.Array: inline_array_sql, exp.CastToStrType: rename_func("CAST"), + exp.CountIf: rename_func("countIf"), exp.CurrentDate: lambda self, e: self.func("CURRENT_DATE"), exp.DateAdd: date_delta_sql("DATE_ADD"), exp.DateDiff: date_delta_sql("DATE_DIFF"),
tobymao/sqlglot
a2499f591eeb7538db86abd8cc9341c8d91e325d
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py index 6844239e..71f2c196 100644 --- a/tests/dialects/test_bigquery.py +++ b/tests/dialects/test_bigquery.py @@ -292,8 +292,13 @@ class TestBigQuery(Validator): ) self.validate_all( "SELECT COUNTIF(x)", + read={ + "clickhouse": "SELECT countIf(x)", + "duckdb": "SELECT COUNT_IF(x)", + }, write={ "bigquery": "SELECT COUNTIF(x)", + "clickhouse": "SELECT countIf(x)", "duckdb": "SELECT COUNT_IF(x)", }, ) diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py index fa40264c..2dfcad5c 100644 --- a/tests/dialects/test_clickhouse.py +++ b/tests/dialects/test_clickhouse.py @@ -26,6 +26,7 @@ class TestClickhouse(Validator): self.assertEqual(expr.sql(dialect="clickhouse"), "COUNT(x)") self.assertIsNone(expr._meta) + self.validate_identity("countIf(x, y)") self.validate_identity("x = y") self.validate_identity("x <> y") self.validate_identity("SELECT * FROM (SELECT a FROM b SAMPLE 0.01)")
two-argument version of clickhouse's `countIf` function incorrectly fails to parse This used to work: ``` In [3]: import sqlglot as sg In [4]: sg.__version__ Out[4]: '20.7.1' In [5]: sg.parse_one("select countIf(x, y)", read="clickhouse") ... ParseError: The number of provided arguments (2) is greater than the maximum number of supported arguments (1). Line 1, Col: 20. select countIf(x, y) ``` ClickHouse CLI: ``` localhost :) select countIf(1, false), countIf(1, true); SELECT countIf(1, false), countIf(1, true) Query id: cd5dea26-fae2-4bdc-a18b-edbf5b910fda β”Œβ”€countIf(1, false)─┬─countIf(1, true)─┐ β”‚ 0 β”‚ 1 β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ ``` Unfortunately the clickhouse docs on `countIf`'s signature are nonexistent, with only a single mention of the function in the `If` combinators section here https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators#-if
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery", "tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse" ]
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat", "tests/dialects/test_bigquery.py::TestBigQuery::test_models", "tests/dialects/test_bigquery.py::TestBigQuery::test_merge", "tests/dialects/test_bigquery.py::TestBigQuery::test_json_object", "tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types", "tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions", "tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table", "tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names", "tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types", "tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization", "tests/dialects/test_clickhouse.py::TestClickhouse::test_cte", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2024-01-08T17:00:53Z"
mit
tobymao__sqlglot-2800
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index ad14e6ee..454df94c 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -328,6 +328,9 @@ def _parse_colon_get_path( if not self._match(TokenType.COLON): break + if self._match_set(self.RANGE_PARSERS): + this = self.RANGE_PARSERS[self._prev.token_type](self, this) or this + return this
tobymao/sqlglot
18e07d3353c1e11cc5b3ba2025e4440f48c2be02
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 602bc630..39963b28 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -78,6 +78,14 @@ WHERE self.validate_identity( "SELECT a FROM test PIVOT(SUM(x) FOR y IN ('z', 'q')) AS x TABLESAMPLE (0.1)" ) + self.validate_identity( + """SELECT PARSE_JSON('{"x": "hello"}'):x LIKE 'hello'""", + """SELECT GET_PATH(PARSE_JSON('{"x": "hello"}'), 'x') LIKE 'hello'""", + ) + self.validate_identity( + """SELECT data:x LIKE 'hello' FROM some_table""", + """SELECT GET_PATH(data, 'x') LIKE 'hello' FROM some_table""", + ) self.validate_identity( "SELECT SUM({ fn CONVERT(123, SQL_DOUBLE) })", "SELECT SUM(CAST(123 AS DOUBLE))",
ParseError when using LIKE/ILIKE on an element in an object in Snowflake I'm getting `ParseError: Invalid expression / Unexpected token` when using `LIKE` or `ILIKE` on an element within an object in Snowflake. Example: ``` import sqlglot sqlglot.parse(""" select parse_json('{"x": "hello"}'):x like 'hello' """, read="snowflake") sqlglot.parse(""" select data:x like 'hello' from some_table """, read="snowflake") ``` Both of these cause the parsing error, but both are valid Snowflake statements.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake" ]
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr", "tests/dialects/test_snowflake.py::TestSnowflake::test_ddl", "tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast", "tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures", "tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data", "tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any", "tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment", "tests/dialects/test_snowflake.py::TestSnowflake::test_values", "tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files", "tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps", "tests/dialects/test_snowflake.py::TestSnowflake::test_minus", "tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table", "tests/dialects/test_snowflake.py::TestSnowflake::test_show", "tests/dialects/test_snowflake.py::TestSnowflake::test_flatten", "tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace", "tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal", "tests/dialects/test_snowflake.py::TestSnowflake::test_swap", "tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types", "tests/dialects/test_snowflake.py::TestSnowflake::test_sample", "tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2024-01-09T16:42:25Z"
mit
tobymao__sqlglot-2825
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index f88f3eb1..6ca8e9de 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -663,7 +663,9 @@ class Snowflake(Dialect): "MINUS": TokenType.EXCEPT, "NCHAR VARYING": TokenType.VARCHAR, "PUT": TokenType.COMMAND, + "REMOVE": TokenType.COMMAND, "RENAME": TokenType.REPLACE, + "RM": TokenType.COMMAND, "SAMPLE": TokenType.TABLE_SAMPLE, "SQL_DOUBLE": TokenType.DOUBLE, "SQL_VARCHAR": TokenType.VARCHAR,
tobymao/sqlglot
7bce2f6abe79dfd8064c625294d94364042207c5
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 1f365bea..5dd81cdc 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -39,6 +39,8 @@ WHERE )""", ) + self.validate_identity("RM @parquet_stage") + self.validate_identity("REMOVE @parquet_stage") self.validate_identity("SELECT TIMESTAMP_FROM_PARTS(d, t)") self.validate_identity("SELECT GET_PATH(v, 'attr[0].name') FROM vartab") self.validate_identity("SELECT TO_ARRAY(CAST(x AS ARRAY))")
sqlglot 20.8.0 incorrectly transpiles or fails to parse Snowflake REMOVE / RM syntax Snowflake RM / REMOVE syntax is incorrectly transpiled or causes errors for the cases from the documentation: `"RM @parquet_stage;" -> "RM AS $parquet_stage;"` is rejected by Snowflake at runtime, the other cases cause ParseErrors. ## reference https://docs.snowflake.com/en/sql-reference/sql/remove ## code ``` examples = [ "RM @parquet_stage;", "REMOVE @parquet_stage;", "RM @%mytable/myobject;", "RM @%mytable/myobject/;", "RM @~ pattern='.*jun.*';", "REMOVE @%orders;", "REMOVE @mystage/path1/subpath2;", ] import sqlglot import sqlglot.errors print(sqlglot.__version__) for i, s in enumerate(examples): try: t = sqlglot.transpile( s.strip(), read="snowflake", write="snowflake", pretty=True ) print(i, s, "->", t) except sqlglot.errors.ParseError as e: print(i, "error", e) ``` ## output ``` 20.8.0 0 RM @parquet_stage; -> ['RM AS $parquet_stage'] 1 REMOVE @parquet_stage; -> ['REMOVE AS $parquet_stage'] 2 error Required keyword: 'this' missing for <class 'sqlglot.expressions.Parameter'>. Line 1, Col: 5. RM @%mytable/myobject; 3 error Required keyword: 'this' missing for <class 'sqlglot.expressions.Parameter'>. Line 1, Col: 5. RM @%mytable/myobject/; 4 error Required keyword: 'this' missing for <class 'sqlglot.expressions.Parameter'>. Line 1, Col: 5. RM @~ pattern='.*jun.*'; 5 error Required keyword: 'this' missing for <class 'sqlglot.expressions.Parameter'>. Line 1, Col: 9. REMOVE @%orders; 6 error Invalid expression / Unexpected token. Line 1, Col: 16. REMOVE @mystage/path1/subpath2; ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake" ]
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_ddl", "tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table", "tests/dialects/test_snowflake.py::TestSnowflake::test_flatten", "tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data", "tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize", "tests/dialects/test_snowflake.py::TestSnowflake::test_minus", "tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment", "tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr", "tests/dialects/test_snowflake.py::TestSnowflake::test_sample", "tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types", "tests/dialects/test_snowflake.py::TestSnowflake::test_show", "tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files", "tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures", "tests/dialects/test_snowflake.py::TestSnowflake::test_swap", "tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal", "tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps", "tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast", "tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions", "tests/dialects/test_snowflake.py::TestSnowflake::test_values" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2024-01-13T00:42:31Z"
mit
tobymao__sqlglot-2857
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index ddad8f83..2286d682 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1667,6 +1667,7 @@ class Index(Expression): "unique": False, "primary": False, "amp": False, # teradata + "include": False, "partition_by": False, # teradata "where": False, # postgres partial indexes } diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 977185ff..79e4b898 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1111,7 +1111,10 @@ class Generator: partition_by = self.expressions(expression, key="partition_by", flat=True) partition_by = f" PARTITION BY {partition_by}" if partition_by else "" where = self.sql(expression, "where") - return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{partition_by}{where}" + include = self.expressions(expression, key="include", flat=True) + if include: + include = f" INCLUDE ({include})" + return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{include}{partition_by}{where}" def identifier_sql(self, expression: exp.Identifier) -> str: text = expression.name diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 5fb40b95..54d09715 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -2681,6 +2681,8 @@ class Parser(metaclass=_Parser): else: columns = None + include = self._parse_wrapped_id_vars() if self._match_text_seq("INCLUDE") else None + return self.expression( exp.Index, this=index, @@ -2690,6 +2692,7 @@ class Parser(metaclass=_Parser): unique=unique, primary=primary, amp=amp, + include=include, partition_by=self._parse_partition_by(), where=self._parse_where(), )
tobymao/sqlglot
bf03a45d8df9abd63b8102e431c13ca0eb0b0fb0
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index ed25315b..1c2a2286 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -582,6 +582,7 @@ class TestPostgres(Validator): cdef.args["kind"].assert_is(exp.DataType) self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL DAY)") + self.validate_identity("CREATE INDEX et_vid_idx ON et(vid) INCLUDE (fid)") self.validate_identity("CREATE INDEX idx_x ON x USING BTREE(x, y) WHERE (NOT y IS NULL)") self.validate_identity("CREATE TABLE test (elems JSONB[])") self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)")
INCLUDE keyword for indexes in PostgreSQL As I understood, the INCLUDE keyword for indexes isn't supported. Not a big problem, but would be better if it worked. ```text Python 3.8.10 (default, Nov 22 2023, 10:22:35) [GCC 9.4.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import sqlglot >>> sqlglot.parse_one("CREATE INDEX et_vid_idx ON et (vid) INCLUDE (fid)", read="postgres") Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/__init__.py", line 125, in parse_one result = dialect.parse(sql, **opts) File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/dialects/dialect.py", line 442, in parse return self.parser(**opts).parse(self.tokenize(sql), sql) File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1034, in parse return self._parse( File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1103, in _parse self.raise_error("Invalid expression / Unexpected token") File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1144, in raise_error raise error sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 1, Col: 43. CREATE INDEX et_vid_idx ON et (vid) INCLUDE (fid) >>> sqlglot.__version__ '20.9.0' ``` Without ``INCLUDE`` everything works: ```text >>> sqlglot.parse_one("CREATE INDEX et_vid_idx ON et (vid)", read="postgres") Create( this=Index( this=Identifier(this=et_vid_idx, quoted=False), table=Table( this=Identifier(this=et, quoted=False)), columns=[ Ordered( this=Column( this=Identifier(this=vid, quoted=False)), nulls_first=False)]), kind=INDEX, exists=False) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_ddl" ]
[ "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_postgres", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_variance" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-19T03:56:02Z"
mit
tobymao__sqlglot-2861
diff --git a/sqlglot/dataframe/sql/functions.py b/sqlglot/dataframe/sql/functions.py index 141a302e..a388cb4b 100644 --- a/sqlglot/dataframe/sql/functions.py +++ b/sqlglot/dataframe/sql/functions.py @@ -661,7 +661,7 @@ def from_utc_timestamp(timestamp: ColumnOrName, tz: ColumnOrName) -> Column: def to_utc_timestamp(timestamp: ColumnOrName, tz: ColumnOrName) -> Column: tz_column = tz if isinstance(tz, Column) else lit(tz) - return Column.invoke_anonymous_function(timestamp, "TO_UTC_TIMESTAMP", tz_column) + return Column.invoke_expression_over_column(timestamp, expression.FromTimeZone, zone=tz_column) def timestamp_seconds(col: ColumnOrName) -> Column: diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py index 0151e6c8..83ae94ed 100644 --- a/sqlglot/dialects/bigquery.py +++ b/sqlglot/dialects/bigquery.py @@ -560,6 +560,9 @@ class BigQuery(Dialect): exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"), exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"), exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), + exp.FromTimeZone: lambda self, e: self.func( + "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'" + ), exp.GenerateSeries: rename_func("GENERATE_ARRAY"), exp.GetPath: path_to_jsonpath(), exp.GroupConcat: rename_func("STRING_AGG"), diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py index 9b421e7f..6cc6030c 100644 --- a/sqlglot/dialects/presto.py +++ b/sqlglot/dialects/presto.py @@ -356,6 +356,7 @@ class Presto(Dialect): exp.Encode: lambda self, e: encode_decode_sql(self, e, "TO_UTF8"), exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'", exp.First: _first_last_sql, + exp.FromTimeZone: lambda self, e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'", exp.GetPath: path_to_jsonpath(), exp.Group: transforms.preprocess([transforms.unalias_group]), exp.GroupConcat: lambda self, e: self.func( diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index 3cf7f7d1..43a439da 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -687,6 +687,9 @@ class Snowflake(Dialect): exp.DayOfYear: rename_func("DAYOFYEAR"), exp.Explode: rename_func("FLATTEN"), exp.Extract: rename_func("DATE_PART"), + exp.FromTimeZone: lambda self, e: self.func( + "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this + ), exp.GenerateSeries: lambda self, e: self.func( "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step") ), diff --git a/sqlglot/dialects/spark2.py b/sqlglot/dialects/spark2.py index e27ba185..7ecb06f0 100644 --- a/sqlglot/dialects/spark2.py +++ b/sqlglot/dialects/spark2.py @@ -133,6 +133,14 @@ class Spark2(Hive): if len(args) == 1 else format_time_lambda(exp.StrToTime, "spark")(args), "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list, + "TO_UTC_TIMESTAMP": lambda args: exp.FromTimeZone( + this=exp.cast_unless( + seq_get(args, 0) or exp.Var(this=""), + exp.DataType.build("timestamp"), + exp.DataType.build("timestamp"), + ), + zone=seq_get(args, 1), + ), "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), "WEEKOFYEAR": lambda args: exp.WeekOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))), } @@ -188,6 +196,7 @@ class Spark2(Hive): exp.DayOfYear: rename_func("DAYOFYEAR"), exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", exp.From: transforms.preprocess([_unalias_pivot]), + exp.FromTimeZone: lambda self, e: f"TO_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", exp.LogicalAnd: rename_func("BOOL_AND"), exp.LogicalOr: rename_func("BOOL_OR"), exp.Map: _map_sql, diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 597a37fe..7f68015e 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -4171,6 +4171,10 @@ class AtTimeZone(Expression): arg_types = {"this": True, "zone": True} +class FromTimeZone(Expression): + arg_types = {"this": True, "zone": True} + + class Between(Predicate): arg_types = {"this": True, "low": True, "high": True} diff --git a/sqlglot/generator.py b/sqlglot/generator.py index bb26b385..704e9eec 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -2555,6 +2555,11 @@ class Generator: zone = self.sql(expression, "zone") return f"{this} AT TIME ZONE {zone}" + def fromtimezone_sql(self, expression: exp.FromTimeZone) -> str: + this = self.sql(expression, "this") + zone = self.sql(expression, "zone") + return f"{this} AT TIME ZONE {zone} AT TIME ZONE 'UTC'" + def add_sql(self, expression: exp.Add) -> str: return self.binary(expression, "+")
tobymao/sqlglot
90ffff83266b5714b1371a576d9484dfbe4be155
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py index 56a573a6..60440373 100644 --- a/tests/dialects/test_spark.py +++ b/tests/dialects/test_spark.py @@ -227,7 +227,6 @@ TBLPROPERTIES ( ) def test_spark(self): - self.validate_identity("FROM_UTC_TIMESTAMP(CAST(x AS TIMESTAMP), 'utc')") expr = parse_one("any_value(col, true)", read="spark") self.assertIsInstance(expr.args.get("ignore_nulls"), exp.Boolean) self.assertEqual(expr.sql(dialect="spark"), "ANY_VALUE(col, TRUE)") @@ -276,6 +275,25 @@ TBLPROPERTIES ( "SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')", ) + self.validate_all( + "SELECT TO_UTC_TIMESTAMP('2016-08-31', 'Asia/Seoul')", + write={ + "bigquery": "SELECT DATETIME(TIMESTAMP(CAST('2016-08-31' AS DATETIME), 'Asia/Seoul'), 'UTC')", + "duckdb": "SELECT CAST('2016-08-31' AS TIMESTAMP) AT TIME ZONE 'Asia/Seoul' AT TIME ZONE 'UTC'", + "postgres": "SELECT CAST('2016-08-31' AS TIMESTAMP) AT TIME ZONE 'Asia/Seoul' AT TIME ZONE 'UTC'", + "presto": "SELECT WITH_TIMEZONE(CAST('2016-08-31' AS TIMESTAMP), 'Asia/Seoul') AT TIME ZONE 'UTC'", + "redshift": "SELECT CAST('2016-08-31' AS TIMESTAMP) AT TIME ZONE 'Asia/Seoul' AT TIME ZONE 'UTC'", + "snowflake": "SELECT CONVERT_TIMEZONE('Asia/Seoul', 'UTC', CAST('2016-08-31' AS TIMESTAMPNTZ))", + "spark": "SELECT TO_UTC_TIMESTAMP(CAST('2016-08-31' AS TIMESTAMP), 'Asia/Seoul')", + }, + ) + self.validate_all( + "SELECT FROM_UTC_TIMESTAMP('2016-08-31', 'Asia/Seoul')", + write={ + "presto": "SELECT CAST('2016-08-31' AS TIMESTAMP) AT TIME ZONE 'Asia/Seoul'", + "spark": "SELECT FROM_UTC_TIMESTAMP(CAST('2016-08-31' AS TIMESTAMP), 'Asia/Seoul')", + }, + ) self.validate_all( "foo.bar", read={
Support for timezone conversion functions between Spark and Trino/Presto dialects **Is your feature request related to a problem? Please describe.** The Spark functions [`from_utc_timestamp`](https://spark.apache.org/docs/3.1.3/api/python/reference/api/pyspark.sql.functions.from_utc_timestamp.html) and [`to_utc_timestamp`](https://spark.apache.org/docs/3.1.2/api/python/reference/api/pyspark.sql.functions.to_utc_timestamp.html) are transpiled as such when converting to Presto/Trino, but these don't exist in those dialects. I believe they should be converted to [ `at_timezone`](https://trino.io/docs/current/functions/datetime.html#at_timezone) and [`with_timezone`](https://trino.io/docs/current/functions/datetime.html#with_timezone) respectively. **Describe the solution you'd like** I'd like support for `at_timezone` and `with_timezone` be added to Presto/Trino dialects such that they transpile `from_utc_timestamp` and `to_utc_timestamp`, from Spark. And vice versa. **Describe alternatives you've considered** I don't have any alternative if I want to use sqlglot. **Additional context**
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_spark.py::TestSpark::test_spark" ]
[ "tests/dialects/test_spark.py::TestSpark::test_bool_or", "tests/dialects/test_spark.py::TestSpark::test_current_user", "tests/dialects/test_spark.py::TestSpark::test_ddl", "tests/dialects/test_spark.py::TestSpark::test_explode_to_unnest", "tests/dialects/test_spark.py::TestSpark::test_hint", "tests/dialects/test_spark.py::TestSpark::test_iif", "tests/dialects/test_spark.py::TestSpark::test_insert_cte", "tests/dialects/test_spark.py::TestSpark::test_to_date", "tests/dialects/test_spark.py::TestSpark::test_transform_query" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-19T17:46:44Z"
mit
tobymao__sqlglot-2873
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py index 1ca0a781..6ee3bdec 100644 --- a/sqlglot/dialects/postgres.py +++ b/sqlglot/dialects/postgres.py @@ -282,6 +282,11 @@ class Postgres(Dialect): VAR_SINGLE_TOKENS = {"$"} class Parser(parser.Parser): + PROPERTY_PARSERS = { + **parser.Parser.PROPERTY_PARSERS, + "SET": lambda self: self.expression(exp.SetConfigProperty, this=self._parse_set()), + } + FUNCTIONS = { **parser.Parser.FUNCTIONS, "DATE_TRUNC": parse_timestamp_trunc, diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 0e6608e2..98114fbd 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -2281,6 +2281,10 @@ class SetProperty(Property): arg_types = {"multi": True} +class SetConfigProperty(Property): + arg_types = {"this": True} + + class SettingsProperty(Property): arg_types = {"expressions": True} diff --git a/sqlglot/generator.py b/sqlglot/generator.py index aa78aef2..75a61c22 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -97,6 +97,7 @@ class Generator: exp.ReturnsProperty: lambda self, e: self.naked_property(e), exp.SampleProperty: lambda self, e: f"SAMPLE BY {self.sql(e, 'this')}", exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET", + exp.SetConfigProperty: lambda self, e: self.sql(e, "this"), exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}", exp.SqlReadWriteProperty: lambda self, e: e.name, exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}", @@ -355,6 +356,7 @@ class Generator: exp.Set: exp.Properties.Location.POST_SCHEMA, exp.SettingsProperty: exp.Properties.Location.POST_SCHEMA, exp.SetProperty: exp.Properties.Location.POST_CREATE, + exp.SetConfigProperty: exp.Properties.Location.POST_SCHEMA, exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA, exp.SqlReadWriteProperty: exp.Properties.Location.POST_SCHEMA, exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE,
tobymao/sqlglot
89b781b991ce264cd7f8c44fa67860eb9a587b07
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 7aafa37b..da3a2065 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -597,6 +597,8 @@ class TestPostgres(Validator): self.validate_identity("CREATE TABLE cities_partdef PARTITION OF cities DEFAULT") self.validate_identity("CREATE TABLE t (c CHAR(2) UNIQUE NOT NULL) INHERITS (t1)") self.validate_identity("CREATE TABLE s.t (c CHAR(2) UNIQUE NOT NULL) INHERITS (s.t1, s.t2)") + self.validate_identity("CREATE FUNCTION x(INT) RETURNS INT SET search_path = 'public'") + self.validate_identity("CREATE FUNCTION x(INT) RETURNS INT SET foo FROM CURRENT") self.validate_identity( "CREATE CONSTRAINT TRIGGER my_trigger AFTER INSERT OR DELETE OR UPDATE OF col_a, col_b ON public.my_table DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION do_sth()" ) @@ -642,16 +644,23 @@ class TestPostgres(Validator): self.validate_identity( "DELETE FROM event USING sales AS s WHERE event.eventid = s.eventid RETURNING a" ) - self.validate_identity( - "CREATE TABLE test (x TIMESTAMP WITHOUT TIME ZONE[][])", - "CREATE TABLE test (x TIMESTAMP[][])", - ) self.validate_identity( "CREATE UNLOGGED TABLE foo AS WITH t(c) AS (SELECT 1) SELECT * FROM (SELECT c AS c FROM t) AS temp" ) self.validate_identity( "WITH t(c) AS (SELECT 1) SELECT * INTO UNLOGGED foo FROM (SELECT c AS c FROM t) AS temp" ) + self.validate_identity( + "CREATE FUNCTION add(INT, INT) RETURNS INT SET search_path TO 'public' AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE" + ) + self.validate_identity( + "CREATE FUNCTION x(INT) RETURNS INT SET search_path TO 'public'", + "CREATE FUNCTION x(INT) RETURNS INT SET search_path = 'public'", + ) + self.validate_identity( + "CREATE TABLE test (x TIMESTAMP WITHOUT TIME ZONE[][])", + "CREATE TABLE test (x TIMESTAMP[][])", + ) self.validate_all( "CREATE OR REPLACE FUNCTION function_name (input_a character varying DEFAULT NULL::character varying)",
Failed to parse SET statement in Postgres CREATE FUNCTION **Fully reproducible code snippet** ```py from sqlglot import parse parse( """ CREATE FUNCTION add(integer, integer) RETURNS integer SET search_path TO 'public' AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; """, dialect="postgres", ) ``` Raises ``` Traceback (most recent call last): File "<...>/create-function-repro.py", line 3, in <module> parse( File "<...>/python3.9/site-packages/sqlglot/__init__.py", line 86, in parse return Dialect.get_or_raise(read or dialect).parse(sql, **opts) File "<...>/python3.9/site-packages/sqlglot/dialects/dialect.py", line 442, in parse return self.parser(**opts).parse(self.tokenize(sql), sql) File "<...>/python3.9/site-packages/sqlglot/parser.py", line 1026, in parse return self._parse( File "<...>/python3.9/site-packages/sqlglot/parser.py", line 1095, in _parse self.raise_error("Invalid expression / Unexpected token") File "<...>/python3.9/site-packages/sqlglot/parser.py", line 1136, in raise_error raise error sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 3, Col: 28. CREATE FUNCTION add(integer, integer) RETURNS integer SET search_path TO 'public' AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; ``` **Official Documentation** - https://www.postgresql.org/docs/current/sql-createfunction.html Notice the allowed statement `SET configuration_parameter { TO value | = value | FROM CURRENT }` in `CREATE FUNCTION`. Unrelated, but `STRICT/RETURNS NULL ON NULL INPUT` also fails, thus I omitted it from the example above.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_ddl" ]
[ "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_postgres", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_variance" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-01-22T15:15:21Z"
mit
tobymao__sqlglot-2935
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py index 0404c78f..6a6825e4 100644 --- a/sqlglot/dialects/postgres.py +++ b/sqlglot/dialects/postgres.py @@ -232,6 +232,9 @@ class Postgres(Dialect): BYTE_STRINGS = [("e'", "'"), ("E'", "'")] HEREDOC_STRINGS = ["$"] + HEREDOC_TAG_IS_IDENTIFIER = True + HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER + KEYWORDS = { **tokens.Tokenizer.KEYWORDS, "~~": TokenType.LIKE, diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index 87a49240..b0649578 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -504,6 +504,7 @@ class _Tokenizer(type): command_prefix_tokens={ _TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS }, + heredoc_tag_is_identifier=klass.HEREDOC_TAG_IS_IDENTIFIER, ) token_types = RsTokenTypeSettings( bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING], @@ -517,6 +518,7 @@ class _Tokenizer(type): semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON], string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING], var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR], + heredoc_string_alternative=_TOKEN_TYPE_TO_INDEX[klass.HEREDOC_STRING_ALTERNATIVE], ) klass._RS_TOKENIZER = RsTokenizer(settings, token_types) else: @@ -573,6 +575,12 @@ class Tokenizer(metaclass=_Tokenizer): STRING_ESCAPES = ["'"] VAR_SINGLE_TOKENS: t.Set[str] = set() + # Whether or not the heredoc tags follow the same lexical rules as unquoted identifiers + HEREDOC_TAG_IS_IDENTIFIER = False + + # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc + HEREDOC_STRING_ALTERNATIVE = TokenType.VAR + # Autofilled _COMMENTS: t.Dict[str, str] = {} _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} @@ -1249,6 +1257,18 @@ class Tokenizer(metaclass=_Tokenizer): elif token_type == TokenType.BIT_STRING: base = 2 elif token_type == TokenType.HEREDOC_STRING: + if ( + self.HEREDOC_TAG_IS_IDENTIFIER + and not self._peek.isidentifier() + and not self._peek == end + ): + if self.HEREDOC_STRING_ALTERNATIVE != token_type.VAR: + self._add(self.HEREDOC_STRING_ALTERNATIVE) + else: + self._scan_var() + + return True + self._advance() tag = "" if self._char == end else self._extract_string(end) end = f"{start}{tag}{end}" diff --git a/sqlglotrs/src/settings.rs b/sqlglotrs/src/settings.rs index 32575c63..c6e76a70 100644 --- a/sqlglotrs/src/settings.rs +++ b/sqlglotrs/src/settings.rs @@ -17,6 +17,7 @@ pub struct TokenTypeSettings { pub semicolon: TokenType, pub string: TokenType, pub var: TokenType, + pub heredoc_string_alternative: TokenType, } #[pymethods] @@ -34,6 +35,7 @@ impl TokenTypeSettings { semicolon: TokenType, string: TokenType, var: TokenType, + heredoc_string_alternative: TokenType, ) -> Self { TokenTypeSettings { bit_string, @@ -47,6 +49,7 @@ impl TokenTypeSettings { semicolon, string, var, + heredoc_string_alternative, } } } @@ -69,6 +72,7 @@ pub struct TokenizerSettings { pub var_single_tokens: HashSet<char>, pub commands: HashSet<TokenType>, pub command_prefix_tokens: HashSet<TokenType>, + pub heredoc_tag_is_identifier: bool, } #[pymethods] @@ -90,6 +94,7 @@ impl TokenizerSettings { var_single_tokens: HashSet<String>, commands: HashSet<TokenType>, command_prefix_tokens: HashSet<TokenType>, + heredoc_tag_is_identifier: bool, ) -> Self { let to_char = |v: &String| { if v.len() == 1 { @@ -138,6 +143,7 @@ impl TokenizerSettings { var_single_tokens: var_single_tokens_native, commands, command_prefix_tokens, + heredoc_tag_is_identifier, } } } diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs index 920a5b5c..94a8b084 100644 --- a/sqlglotrs/src/tokenizer.rs +++ b/sqlglotrs/src/tokenizer.rs @@ -399,6 +399,19 @@ impl<'a> TokenizerState<'a> { } else if *token_type == self.token_types.bit_string { (Some(2), *token_type, end.clone()) } else if *token_type == self.token_types.heredoc_string { + if self.settings.heredoc_tag_is_identifier + && !self.is_identifier(self.peek_char) + && self.peek_char.to_string() != *end + { + if self.token_types.heredoc_string_alternative != self.token_types.var { + self.add(self.token_types.heredoc_string_alternative, None)? + } else { + self.scan_var()? + }; + + return Ok(true) + }; + self.advance(1)?; let tag = if self.current_char.to_string() == *end { String::from("") @@ -469,7 +482,7 @@ impl<'a> TokenizerState<'a> { } else if self.peek_char.to_ascii_uppercase() == 'E' && scientific == 0 { scientific += 1; self.advance(1)?; - } else if self.peek_char.is_alphabetic() || self.peek_char == '_' { + } else if self.is_identifier(self.peek_char) { let number_text = self.text(); let mut literal = String::from(""); @@ -643,6 +656,10 @@ impl<'a> TokenizerState<'a> { Ok(text) } + fn is_identifier(&mut self, name: char) -> bool { + name.is_alphabetic() || name == '_' + } + fn extract_value(&mut self) -> Result<String, TokenizerError> { loop { if !self.peek_char.is_whitespace()
tobymao/sqlglot
b8276262bdca57e358284fadfdd468d2bc957e84
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py index dd73bae3..7351f6a0 100644 --- a/tests/dialects/test_clickhouse.py +++ b/tests/dialects/test_clickhouse.py @@ -77,6 +77,10 @@ class TestClickhouse(Validator): self.validate_identity("""SELECT JSONExtractString('{"x": {"y": 1}}', 'x', 'y')""") self.validate_identity("SELECT * FROM table LIMIT 1 BY a, b") self.validate_identity("SELECT * FROM table LIMIT 2 OFFSET 1 BY a, b") + self.validate_identity( + "SELECT $1$foo$1$", + "SELECT 'foo'", + ) self.validate_identity( "SELECT * FROM table LIMIT 1, 2 BY a, b", "SELECT * FROM table LIMIT 2 OFFSET 1 BY a, b", diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 9c4246e5..61421e5f 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -33,6 +33,7 @@ class TestPostgres(Validator): self.assertIsInstance(expr, exp.AlterTable) self.assertEqual(expr.sql(dialect="postgres"), alter_table_only) + self.validate_identity("SELECT x FROM t WHERE CAST($1 AS TEXT) = 'ok'") self.validate_identity("SELECT * FROM t TABLESAMPLE SYSTEM (50) REPEATABLE (55)") self.validate_identity("x @@ y") self.validate_identity("CAST(x AS MONEY)")
Parameter `$1` for `postgres` is not supported In version `v18.8.0`, heredoc strings support was added, and `"$": TokenType.PARAMETER` was replaced with `"$": TokenType.HEREDOC_STRING`. Since then, sql query with a parameter like `$1` cannot be parsed. Here is the original PR [Feat!: add support for heredoc strings (Postgres, ClickHouse) #2328](https://github.com/tobymao/sqlglot/pull/2328) **Fully reproducible code snippet** ``` import sqlglot from sqlglot.optimizer.annotate_types import annotate_types from sqlglot.optimizer.qualify import qualify schema = {"t": {"x": "text"}} sql = "select x from t where $1::text = 'ok'" expression = sqlglot.parse_one(sql, dialect="postgres") print(expression.sql(dialect="postgres")) qualified_expr = qualify(expression, schema=schema, dialect="postgres") annotated_expr = annotate_types(qualified_expr, schema=schema) print(annotated_expr.selects[0].type) ``` Exception was raised. Expected output is `TEXT`. ``` Traceback (most recent call last): File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 836, in tokenize self._scan() File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 859, in _scan self._scan_keywords() File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 994, in _scan_keywords if self._scan_string(word): File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 1136, in _scan_string tag = "" if self._char == end else self._extract_string(end) File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 1203, in _extract_string raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}") sqlglot.errors.TokenError: Missing $ from 1:22 The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/eric/tmp/test.py", line 20, in <module> expression = sqlglot.parse_one(sql, dialect="postgres") File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/__init__.py", line 125, in parse_one result = dialect.parse(sql, **opts) File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/dialects/dialect.py", line 304, in parse return self.parser(**opts).parse(self.tokenize(sql), sql) File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/dialects/dialect.py", line 318, in tokenize return self.tokenizer.tokenize(sql) File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 841, in tokenize raise TokenError(f"Error tokenizing '{context}'") from e sqlglot.errors.TokenError: Error tokenizing 'select x from t where $1::text = 'ok' ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_postgres" ]
[ "tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse", "tests/dialects/test_clickhouse.py::TestClickhouse::test_cte", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl", "tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization", "tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary", "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_ddl", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_variance" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-02-08T19:03:10Z"
mit
tobymao__sqlglot-2936
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py index d7ba729c..e61ac4fd 100644 --- a/sqlglot/dialects/duckdb.py +++ b/sqlglot/dialects/duckdb.py @@ -333,6 +333,7 @@ class DuckDB(Dialect): IGNORE_NULLS_IN_FUNC = True JSON_PATH_BRACKETED_KEY_SUPPORTED = False SUPPORTS_CREATE_TABLE_LIKE = False + MULTI_ARG_DISTINCT = False TRANSFORMS = { **generator.Generator.TRANSFORMS, diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py index 0404c78f..68e2c6de 100644 --- a/sqlglot/dialects/postgres.py +++ b/sqlglot/dialects/postgres.py @@ -232,6 +232,9 @@ class Postgres(Dialect): BYTE_STRINGS = [("e'", "'"), ("E'", "'")] HEREDOC_STRINGS = ["$"] + HEREDOC_TAG_IS_IDENTIFIER = True + HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER + KEYWORDS = { **tokens.Tokenizer.KEYWORDS, "~~": TokenType.LIKE, @@ -381,6 +384,7 @@ class Postgres(Dialect): JSON_TYPE_REQUIRED_FOR_EXTRACTION = True SUPPORTS_UNLOGGED_TABLES = True LIKE_PROPERTY_INSIDE_SCHEMA = True + MULTI_ARG_DISTINCT = False SUPPORTED_JSON_PATH_PARTS = { exp.JSONPathKey, diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py index 8691192b..609103e5 100644 --- a/sqlglot/dialects/presto.py +++ b/sqlglot/dialects/presto.py @@ -292,6 +292,7 @@ class Presto(Dialect): LIMIT_ONLY_LITERALS = True SUPPORTS_SINGLE_ARG_CONCAT = False LIKE_PROPERTY_INSIDE_SCHEMA = True + MULTI_ARG_DISTINCT = False PROPERTIES_LOCATION = { **generator.Generator.PROPERTIES_LOCATION, diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 81af56d8..eff8aaa2 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -296,6 +296,10 @@ class Generator(metaclass=_Generator): # Whether or not the LikeProperty needs to be specified inside of the schema clause LIKE_PROPERTY_INSIDE_SCHEMA = False + # Whether or not DISTINCT can be followed by multiple args in an AggFunc. If not, it will be + # transpiled into a series of CASE-WHEN-ELSE, ultimately using a tuple conseisting of the args + MULTI_ARG_DISTINCT = True + # Whether or not the JSON extraction operators expect a value of type JSON JSON_TYPE_REQUIRED_FOR_EXTRACTION = False @@ -2837,6 +2841,13 @@ class Generator(metaclass=_Generator): def distinct_sql(self, expression: exp.Distinct) -> str: this = self.expressions(expression, flat=True) + + if not self.MULTI_ARG_DISTINCT and len(expression.expressions) > 1: + case = exp.case() + for arg in expression.expressions: + case = case.when(arg.is_(exp.null()), exp.null()) + this = self.sql(case.else_(f"({this})")) + this = f" {this}" if this else "" on = self.sql(expression, "on") diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index 87a49240..b0649578 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -504,6 +504,7 @@ class _Tokenizer(type): command_prefix_tokens={ _TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS }, + heredoc_tag_is_identifier=klass.HEREDOC_TAG_IS_IDENTIFIER, ) token_types = RsTokenTypeSettings( bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING], @@ -517,6 +518,7 @@ class _Tokenizer(type): semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON], string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING], var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR], + heredoc_string_alternative=_TOKEN_TYPE_TO_INDEX[klass.HEREDOC_STRING_ALTERNATIVE], ) klass._RS_TOKENIZER = RsTokenizer(settings, token_types) else: @@ -573,6 +575,12 @@ class Tokenizer(metaclass=_Tokenizer): STRING_ESCAPES = ["'"] VAR_SINGLE_TOKENS: t.Set[str] = set() + # Whether or not the heredoc tags follow the same lexical rules as unquoted identifiers + HEREDOC_TAG_IS_IDENTIFIER = False + + # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc + HEREDOC_STRING_ALTERNATIVE = TokenType.VAR + # Autofilled _COMMENTS: t.Dict[str, str] = {} _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} @@ -1249,6 +1257,18 @@ class Tokenizer(metaclass=_Tokenizer): elif token_type == TokenType.BIT_STRING: base = 2 elif token_type == TokenType.HEREDOC_STRING: + if ( + self.HEREDOC_TAG_IS_IDENTIFIER + and not self._peek.isidentifier() + and not self._peek == end + ): + if self.HEREDOC_STRING_ALTERNATIVE != token_type.VAR: + self._add(self.HEREDOC_STRING_ALTERNATIVE) + else: + self._scan_var() + + return True + self._advance() tag = "" if self._char == end else self._extract_string(end) end = f"{start}{tag}{end}" diff --git a/sqlglotrs/src/settings.rs b/sqlglotrs/src/settings.rs index 32575c63..c6e76a70 100644 --- a/sqlglotrs/src/settings.rs +++ b/sqlglotrs/src/settings.rs @@ -17,6 +17,7 @@ pub struct TokenTypeSettings { pub semicolon: TokenType, pub string: TokenType, pub var: TokenType, + pub heredoc_string_alternative: TokenType, } #[pymethods] @@ -34,6 +35,7 @@ impl TokenTypeSettings { semicolon: TokenType, string: TokenType, var: TokenType, + heredoc_string_alternative: TokenType, ) -> Self { TokenTypeSettings { bit_string, @@ -47,6 +49,7 @@ impl TokenTypeSettings { semicolon, string, var, + heredoc_string_alternative, } } } @@ -69,6 +72,7 @@ pub struct TokenizerSettings { pub var_single_tokens: HashSet<char>, pub commands: HashSet<TokenType>, pub command_prefix_tokens: HashSet<TokenType>, + pub heredoc_tag_is_identifier: bool, } #[pymethods] @@ -90,6 +94,7 @@ impl TokenizerSettings { var_single_tokens: HashSet<String>, commands: HashSet<TokenType>, command_prefix_tokens: HashSet<TokenType>, + heredoc_tag_is_identifier: bool, ) -> Self { let to_char = |v: &String| { if v.len() == 1 { @@ -138,6 +143,7 @@ impl TokenizerSettings { var_single_tokens: var_single_tokens_native, commands, command_prefix_tokens, + heredoc_tag_is_identifier, } } } diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs index 920a5b5c..94a8b084 100644 --- a/sqlglotrs/src/tokenizer.rs +++ b/sqlglotrs/src/tokenizer.rs @@ -399,6 +399,19 @@ impl<'a> TokenizerState<'a> { } else if *token_type == self.token_types.bit_string { (Some(2), *token_type, end.clone()) } else if *token_type == self.token_types.heredoc_string { + if self.settings.heredoc_tag_is_identifier + && !self.is_identifier(self.peek_char) + && self.peek_char.to_string() != *end + { + if self.token_types.heredoc_string_alternative != self.token_types.var { + self.add(self.token_types.heredoc_string_alternative, None)? + } else { + self.scan_var()? + }; + + return Ok(true) + }; + self.advance(1)?; let tag = if self.current_char.to_string() == *end { String::from("") @@ -469,7 +482,7 @@ impl<'a> TokenizerState<'a> { } else if self.peek_char.to_ascii_uppercase() == 'E' && scientific == 0 { scientific += 1; self.advance(1)?; - } else if self.peek_char.is_alphabetic() || self.peek_char == '_' { + } else if self.is_identifier(self.peek_char) { let number_text = self.text(); let mut literal = String::from(""); @@ -643,6 +656,10 @@ impl<'a> TokenizerState<'a> { Ok(text) } + fn is_identifier(&mut self, name: char) -> bool { + name.is_alphabetic() || name == '_' + } + fn extract_value(&mut self) -> Result<String, TokenizerError> { loop { if !self.peek_char.is_whitespace()
tobymao/sqlglot
b8276262bdca57e358284fadfdd468d2bc957e84
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py index dd73bae3..7351f6a0 100644 --- a/tests/dialects/test_clickhouse.py +++ b/tests/dialects/test_clickhouse.py @@ -77,6 +77,10 @@ class TestClickhouse(Validator): self.validate_identity("""SELECT JSONExtractString('{"x": {"y": 1}}', 'x', 'y')""") self.validate_identity("SELECT * FROM table LIMIT 1 BY a, b") self.validate_identity("SELECT * FROM table LIMIT 2 OFFSET 1 BY a, b") + self.validate_identity( + "SELECT $1$foo$1$", + "SELECT 'foo'", + ) self.validate_identity( "SELECT * FROM table LIMIT 1, 2 BY a, b", "SELECT * FROM table LIMIT 2 OFFSET 1 BY a, b", diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 9c4246e5..61421e5f 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -33,6 +33,7 @@ class TestPostgres(Validator): self.assertIsInstance(expr, exp.AlterTable) self.assertEqual(expr.sql(dialect="postgres"), alter_table_only) + self.validate_identity("SELECT x FROM t WHERE CAST($1 AS TEXT) = 'ok'") self.validate_identity("SELECT * FROM t TABLESAMPLE SYSTEM (50) REPEATABLE (55)") self.validate_identity("x @@ y") self.validate_identity("CAST(x AS MONEY)") diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py index a02a735c..75bb91af 100644 --- a/tests/dialects/test_spark.py +++ b/tests/dialects/test_spark.py @@ -277,6 +277,21 @@ TBLPROPERTIES ( "SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')", ) + self.validate_all( + "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl", + write={ + "clickhouse": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl", + "databricks": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl", + "doris": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl", + "duckdb": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT CASE WHEN id IS NULL THEN NULL WHEN name IS NULL THEN NULL ELSE (id, name) END) AS cnt FROM tbl", + "hive": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl", + "mysql": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl", + "postgres": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT CASE WHEN id IS NULL THEN NULL WHEN name IS NULL THEN NULL ELSE (id, name) END) AS cnt FROM tbl", + "presto": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT CASE WHEN id IS NULL THEN NULL WHEN name IS NULL THEN NULL ELSE (id, name) END) AS cnt FROM tbl", + "snowflake": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl", + "spark": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl", + }, + ) self.validate_all( "SELECT TO_UTC_TIMESTAMP('2016-08-31', 'Asia/Seoul')", write={
Count distinct syntax error while transpiling to trino sql **Before you file an issue** - Make sure you specify the "read" dialect eg. parse_one(sql, read="spark") - Check if the issue still exists on main **Fully reproducible code snippet** ```python import sqlglot sql = """ with tbl as ( select 1 as id, 'eggy' as name union all select null as id, 'jake' as name ) select count(distinct id, name) as cnt from tbl """ sqlglot.transpile(sql, read="hive", write="trino")[0] ``` **Details** The result of code sinppet is ⬇️ ``` "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl" ``` and I run this sql in the trino cli, it would raise error below, as I konwn that trino/presto dose not support syntax `count(distinct col_1, col_2, ...)` but for wrapping multi-columns into **row** type like `count(distinct (col_1, col_2, ...))`. <img width="808" alt="image" src="https://github.com/tobymao/sqlglot/assets/60967034/82f76a45-1519-401a-9133-b749317ba0d5">
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_postgres", "tests/dialects/test_spark.py::TestSpark::test_spark" ]
[ "tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse", "tests/dialects/test_clickhouse.py::TestClickhouse::test_cte", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl", "tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization", "tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary", "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_ddl", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_variance", "tests/dialects/test_spark.py::TestSpark::test_bool_or", "tests/dialects/test_spark.py::TestSpark::test_current_user", "tests/dialects/test_spark.py::TestSpark::test_ddl", "tests/dialects/test_spark.py::TestSpark::test_explode_to_unnest", "tests/dialects/test_spark.py::TestSpark::test_hint", "tests/dialects/test_spark.py::TestSpark::test_iif", "tests/dialects/test_spark.py::TestSpark::test_insert_cte", "tests/dialects/test_spark.py::TestSpark::test_to_date", "tests/dialects/test_spark.py::TestSpark::test_transform_query" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-02-08T20:50:27Z"
mit
tobymao__sqlglot-2938
diff --git a/sqlglot/generator.py b/sqlglot/generator.py index eff8aaa2..e22c2975 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -2861,12 +2861,20 @@ class Generator(metaclass=_Generator): return self._embed_ignore_nulls(expression, "RESPECT NULLS") def _embed_ignore_nulls(self, expression: exp.IgnoreNulls | exp.RespectNulls, text: str) -> str: - if self.IGNORE_NULLS_IN_FUNC: - this = expression.find(exp.AggFunc) - if this: - sql = self.sql(this) - sql = sql[:-1] + f" {text})" - return sql + if self.IGNORE_NULLS_IN_FUNC and not expression.meta.get("inline"): + for klass in (exp.Order, exp.Limit): + mod = expression.find(klass) + + if mod: + this = expression.__class__(this=mod.this.copy()) + this.meta["inline"] = True + mod.this.replace(this) + return self.sql(expression.this) + + agg_func = expression.find(exp.AggFunc) + + if agg_func: + return self.sql(agg_func)[:-1] + f" {text})" return f"{self.sql(expression, 'this')} {text}"
tobymao/sqlglot
31e1908d33a7fa01727159a4ab38b7cc9962fcbd
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py index 340630c2..41b96980 100644 --- a/tests/dialects/test_bigquery.py +++ b/tests/dialects/test_bigquery.py @@ -18,6 +18,11 @@ class TestBigQuery(Validator): maxDiff = None def test_bigquery(self): + self.validate_identity("ARRAY_AGG(x IGNORE NULLS LIMIT 1)") + self.validate_identity("ARRAY_AGG(x IGNORE NULLS ORDER BY x LIMIT 1)") + self.validate_identity("ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY x LIMIT 1)") + self.validate_identity("ARRAY_AGG(x IGNORE NULLS)") + self.validate_all( "SELECT SUM(x IGNORE NULLS) AS x", read={ @@ -55,6 +60,7 @@ class TestBigQuery(Validator): self.validate_all( "SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()", write={ + "bigquery": "SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()", "duckdb": "SELECT QUANTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()", "spark": "SELECT PERCENTILE_CONT(x, 0.5) RESPECT NULLS OVER ()", }, @@ -62,14 +68,16 @@ class TestBigQuery(Validator): self.validate_all( "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x", write={ - "duckdb": "SELECT ARRAY_AGG(DISTINCT x ORDER BY a NULLS FIRST, b DESC LIMIT 10 IGNORE NULLS) AS x", + "bigquery": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x", + "duckdb": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a NULLS FIRST, b DESC LIMIT 10) AS x", "spark": "SELECT COLLECT_LIST(DISTINCT x ORDER BY a, b DESC LIMIT 10) IGNORE NULLS AS x", }, ) self.validate_all( "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x", write={ - "duckdb": "SELECT ARRAY_AGG(DISTINCT x ORDER BY a NULLS FIRST, b DESC LIMIT 1, 10 IGNORE NULLS) AS x", + "bigquery": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x", + "duckdb": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a NULLS FIRST, b DESC LIMIT 1, 10) AS x", "spark": "SELECT COLLECT_LIST(DISTINCT x ORDER BY a, b DESC LIMIT 1, 10) IGNORE NULLS AS x", }, )
Order of `IGNORE NULLS` in BigQuery generated SQL is incorrect `IGNORE NULLS` needs to occur before `LIMIT`, otherwise it's an error. **Fully reproducible code snippet** ``` In [7]: import sqlglot as sg In [8]: sg.__version__ Out[8]: '21.0.1' In [9]: sg.parse_one('select array_agg(x ignore nulls limit 1)', read='bigquery').sql('bigquery') Out[9]: 'SELECT array_agg(x LIMIT 1 IGNORE NULLS)' ``` **Official Documentation** https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate-function-calls
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery" ]
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat", "tests/dialects/test_bigquery.py::TestBigQuery::test_json_object", "tests/dialects/test_bigquery.py::TestBigQuery::test_merge", "tests/dialects/test_bigquery.py::TestBigQuery::test_models", "tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names", "tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types", "tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table", "tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2024-02-08T23:35:03Z"
mit
tobymao__sqlglot-2956
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py index a64c1d40..2ad9ac34 100644 --- a/sqlglot/dialects/redshift.py +++ b/sqlglot/dialects/redshift.py @@ -136,11 +136,11 @@ class Redshift(Postgres): refs.add( ( this.args["from"] if i == 0 else this.args["joins"][i - 1] - ).alias_or_name.lower() + ).this.alias.lower() ) - table = join.this - if isinstance(table, exp.Table): + table = join.this + if isinstance(table, exp.Table) and not join.args.get("on"): if table.parts[0].name.lower() in refs: table.replace(table.to_column()) return this
tobymao/sqlglot
78e6d0de83efbff1d3b61c8550db56c1819f7c22
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py index b6b6ccc3..6925a64b 100644 --- a/tests/dialects/test_redshift.py +++ b/tests/dialects/test_redshift.py @@ -1,4 +1,4 @@ -from sqlglot import transpile +from sqlglot import exp, parse_one, transpile from tests.dialects.test_dialect import Validator @@ -381,8 +381,6 @@ class TestRedshift(Validator): "SELECT DATEADD(DAY, 1, DATE('2023-01-01'))", ) - self.validate_identity("SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l") - self.validate_identity( """SELECT c_name, @@ -532,3 +530,26 @@ FROM ( "redshift": "CREATE OR REPLACE VIEW v1 AS SELECT cola, colb FROM t1 WITH NO SCHEMA BINDING", }, ) + + def test_column_unnesting(self): + ast = parse_one("SELECT * FROM t.t JOIN t.c1 ON c1.c2 = t.c3", read="redshift") + ast.args["from"].this.assert_is(exp.Table) + ast.args["joins"][0].this.assert_is(exp.Table) + self.assertEqual(ast.sql("redshift"), "SELECT * FROM t.t JOIN t.c1 ON c1.c2 = t.c3") + + ast = parse_one("SELECT * FROM t AS t CROSS JOIN t.c1", read="redshift") + ast.args["from"].this.assert_is(exp.Table) + ast.args["joins"][0].this.assert_is(exp.Column) + self.assertEqual(ast.sql("redshift"), "SELECT * FROM t AS t CROSS JOIN t.c1") + + ast = parse_one( + "SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l", read="redshift" + ) + joins = ast.args["joins"] + ast.args["from"].this.assert_is(exp.Table) + joins[0].this.this.assert_is(exp.Column) + joins[1].this.this.assert_is(exp.Column) + joins[2].this.this.assert_is(exp.Dot) + self.assertEqual( + ast.sql("redshift"), "SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l" + )
Table incorrectly parsed as column # Description Table of format `<schema>.<table>` in a join gets mis-parsed as a column. In the attached example, the table `"usage"."company_names"` gets misparsed as a column. ### Version details - Dialect is 'redshift' - The issue exists on the latest pip version of sqlglot (21.0.2) - The issue doesn't exist on the latest pip version of sqlglot (20.4.0) ### Correct output with sqlglot==20.4.0 ``` Python 3.11.7 (main, Dec 4 2023, 18:10:11) [Clang 15.0.0 (clang-1500.1.0.2.5)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import sqlglot from sqlglot import parse_one sqlglot.__version__>>> from sqlglot import parse_one >>> sqlglot.__version__ '20.4.0' >>> dialect = 'redshift' >>> query = 'SELECT * FROM "usage"."usage" JOIN "usage"."company_names" ON "company_names"."id" = "usage"."customerid"' >>> parse_one(query, dialect=dialect) (SELECT expressions: (STAR ), from: (FROM this: (TABLE this: (IDENTIFIER this: usage, quoted: True), db: (IDENTIFIER this: usage, quoted: True))), joins: (JOIN this: (TABLE this: (IDENTIFIER this: company_names, quoted: True), db: (IDENTIFIER this: usage, quoted: True)), on: (EQ this: (COLUMN this: (IDENTIFIER this: id, quoted: True), table: (IDENTIFIER this: company_names, quoted: True)), expression: (COLUMN this: (IDENTIFIER this: customerid, quoted: True), table: (IDENTIFIER this: usage, quoted: True))))) ``` ### Incorrect output with sqlglot==21.0.2 ``` Python 3.11.7 (main, Dec 4 2023, 18:10:11) [Clang 15.0.0 (clang-1500.1.0.2.5)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import sqlglot from sqlglot import parse_one sqlglot.__version__>>> from sqlglot import parse_one >>> sqlglot.__version__ '21.0.2' >>> dialect = 'redshift' >>> query = 'SELECT * FROM "usage"."usage" JOIN "usage"."company_names" ON "company_names"."id" = "usage"."customerid"' >>> parse_one(query, dialect=dialect) Select( expressions=[ Star()], from=From( this=Table( this=Identifier(this=usage, quoted=True), db=Identifier(this=usage, quoted=True))), joins=[ Join( this=Column( this=Identifier(this=company_names, quoted=True), table=Identifier(this=usage, quoted=True)), on=EQ( this=Column( this=Identifier(this=id, quoted=True), table=Identifier(this=company_names, quoted=True)), expression=Column( this=Identifier(this=customerid, quoted=True), table=Identifier(this=usage, quoted=True))))]) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_redshift.py::TestRedshift::test_column_unnesting" ]
[ "tests/dialects/test_redshift.py::TestRedshift::test_create_table_like", "tests/dialects/test_redshift.py::TestRedshift::test_identity", "tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding", "tests/dialects/test_redshift.py::TestRedshift::test_redshift", "tests/dialects/test_redshift.py::TestRedshift::test_rename_table", "tests/dialects/test_redshift.py::TestRedshift::test_values", "tests/dialects/test_redshift.py::TestRedshift::test_varchar_max" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2024-02-12T17:23:40Z"
mit
tobymao__sqlglot-3011
diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py index 3c8bb5f7..d4344f69 100644 --- a/sqlglot/dialects/bigquery.py +++ b/sqlglot/dialects/bigquery.py @@ -302,6 +302,7 @@ class BigQuery(Dialect): "BYTES": TokenType.BINARY, "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, "DECLARE": TokenType.COMMAND, + "ELSEIF": TokenType.COMMAND, "EXCEPTION": TokenType.COMMAND, "FLOAT64": TokenType.DOUBLE, "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT, @@ -410,6 +411,7 @@ class BigQuery(Dialect): STATEMENT_PARSERS = { **parser.Parser.STATEMENT_PARSERS, + TokenType.ELSE: lambda self: self._parse_as_command(self._prev), TokenType.END: lambda self: self._parse_as_command(self._prev), TokenType.FOR: lambda self: self._parse_for_in(), }
tobymao/sqlglot
9079ead97701b32bde0b2d704bbf8f9b67f5a740
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py index 37846da3..cf8cb3b6 100644 --- a/tests/dialects/test_bigquery.py +++ b/tests/dialects/test_bigquery.py @@ -21,6 +21,7 @@ class TestBigQuery(Validator): self.validate_identity("SELECT * FROM x.*") self.validate_identity("SELECT * FROM x.y*") + self.validate_identity("CASE A WHEN 90 THEN 'red' WHEN 50 THEN 'blue' ELSE 'green' END") self.validate_identity("CREATE SCHEMA x DEFAULT COLLATE 'en'") self.validate_identity("CREATE TABLE x (y INT64) DEFAULT COLLATE 'en'") self.validate_identity("PARSE_JSON('{}', wide_number_mode => 'exact')") @@ -1091,6 +1092,35 @@ WHERE self.assertIn("unsupported syntax", cm.output[0]) + with self.assertLogs(helper_logger): + statements = parse( + """ + BEGIN + DECLARE MY_VAR INT64 DEFAULT 1; + SET MY_VAR = (SELECT 0); + + IF MY_VAR = 1 THEN SELECT 'TRUE'; + ELSEIF MY_VAR = 0 THEN SELECT 'FALSE'; + ELSE SELECT 'NULL'; + END IF; + END + """, + read="bigquery", + ) + + expected_statements = ( + "BEGIN DECLARE MY_VAR INT64 DEFAULT 1", + "SET MY_VAR = (SELECT 0)", + "IF MY_VAR = 1 THEN SELECT 'TRUE'", + "ELSEIF MY_VAR = 0 THEN SELECT 'FALSE'", + "ELSE SELECT 'NULL'", + "END IF", + "END", + ) + + for actual, expected in zip(statements, expected_statements): + self.assertEqual(actual.sql(dialect="bigquery"), expected) + with self.assertLogs(helper_logger) as cm: self.validate_identity( "SELECT * FROM t AS t(c1, c2)",
Bigquery parse unexpected token found (= ) in elseif clause Code: ```python import sqlglot my_str = """BEGIN DECLARE MY_VAR INT64 DEFAULT 1; SET MY_VAR = (SELECT 0); IF MY_VAR = 1 THEN SELECT 'TRUE'; ELSEIF MY_VAR = 0 THEN SELECT 'FALSE'; ELSE SELECT 'NULL'; END IF; END """ parsed_objects = sqlglot.parse(my_str, dialect='bigquery') print(parsed_objects) ``` error: ``` sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 9, Col: 15. VAR INT64 DEFAULT 1; SET MY_VAR = (SELECT 0); IF MY_VAR = 1 THEN SELECT 'TRUE'; ELSEIF MY_VAR = 0 THEN SELECT 'FALSE'; ELSE SELECT 'NULL'; END IF; END ``` `=` is underlined
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_warnings" ]
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery", "tests/dialects/test_bigquery.py::TestBigQuery::test_errors", "tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat", "tests/dialects/test_bigquery.py::TestBigQuery::test_json_object", "tests/dialects/test_bigquery.py::TestBigQuery::test_merge", "tests/dialects/test_bigquery.py::TestBigQuery::test_models", "tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names", "tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types", "tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table", "tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2024-02-22T14:44:19Z"
mit
tobymao__sqlglot-3027
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py index 13f8b858..7a160c1d 100644 --- a/sqlglot/dialects/mysql.py +++ b/sqlglot/dialects/mysql.py @@ -391,6 +391,11 @@ class MySQL(Dialect): "WARNINGS": _show_parser("WARNINGS"), } + PROPERTY_PARSERS = { + **parser.Parser.PROPERTY_PARSERS, + "LOCK": lambda self: self._parse_property_assignment(exp.LockProperty), + } + SET_PARSERS = { **parser.Parser.SET_PARSERS, "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"), diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 4fb3679a..3bec25ee 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -2322,6 +2322,10 @@ class LocationProperty(Property): arg_types = {"this": True} +class LockProperty(Property): + arg_types = {"this": True} + + class LockingProperty(Property): arg_types = { "this": False, @@ -2505,6 +2509,7 @@ class Properties(Expression): "FORMAT": FileFormatProperty, "LANGUAGE": LanguageProperty, "LOCATION": LocationProperty, + "LOCK": LockProperty, "PARTITIONED_BY": PartitionedByProperty, "RETURNS": ReturnsProperty, "ROW_FORMAT": RowFormatProperty, @@ -3923,7 +3928,13 @@ class Rollback(Expression): class AlterTable(Expression): - arg_types = {"this": True, "actions": True, "exists": False, "only": False} + arg_types = { + "this": True, + "actions": True, + "exists": False, + "only": False, + "options": False, + } class AddConstraint(Expression): diff --git a/sqlglot/generator.py b/sqlglot/generator.py index d8b6290d..66466734 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -388,6 +388,7 @@ class Generator(metaclass=_Generator): exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA, exp.LikeProperty: exp.Properties.Location.POST_SCHEMA, exp.LocationProperty: exp.Properties.Location.POST_SCHEMA, + exp.LockProperty: exp.Properties.Location.POST_SCHEMA, exp.LockingProperty: exp.Properties.Location.POST_ALIAS, exp.LogProperty: exp.Properties.Location.POST_NAME, exp.MaterializedProperty: exp.Properties.Location.POST_CREATE, @@ -2833,7 +2834,9 @@ class Generator(metaclass=_Generator): exists = " IF EXISTS" if expression.args.get("exists") else "" only = " ONLY" if expression.args.get("only") else "" - return f"ALTER TABLE{exists}{only} {self.sql(expression, 'this')} {actions}" + options = self.expressions(expression, key="options") + options = f", {options}" if options else "" + return f"ALTER TABLE{exists}{only} {self.sql(expression, 'this')} {actions}{options}" def add_column_sql(self, expression: exp.AlterTable) -> str: if self.ALTER_TABLE_INCLUDE_COLUMN_KEYWORD: diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 322fa1ca..3465c56d 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -5281,6 +5281,9 @@ class Parser(metaclass=_Parser): def _parse_var_or_string(self) -> t.Optional[exp.Expression]: return self._parse_var() or self._parse_string() + def _parse_primary_or_var(self) -> t.Optional[exp.Expression]: + return self._parse_primary() or self._parse_var(any_token=True) + def _parse_null(self) -> t.Optional[exp.Expression]: if self._match_set(self.NULL_TOKENS): return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev) @@ -5299,16 +5302,12 @@ class Parser(metaclass=_Parser): return self._parse_placeholder() def _parse_parameter(self) -> exp.Parameter: - def _parse_parameter_part() -> t.Optional[exp.Expression]: - return ( - self._parse_identifier() or self._parse_primary() or self._parse_var(any_token=True) - ) - self._match(TokenType.L_BRACE) - this = _parse_parameter_part() - expression = self._match(TokenType.COLON) and _parse_parameter_part() + this = self._parse_identifier() or self._parse_primary_or_var() + expression = self._match(TokenType.COLON) and ( + self._parse_identifier() or self._parse_primary_or_var() + ) self._match(TokenType.R_BRACE) - return self.expression(exp.Parameter, this=this, expression=expression) def _parse_placeholder(self) -> t.Optional[exp.Expression]: @@ -5551,6 +5550,7 @@ class Parser(metaclass=_Parser): parser = self.ALTER_PARSERS.get(self._prev.text.upper()) if self._prev else None if parser: actions = ensure_list(parser(self)) + options = self._parse_csv(self._parse_property) if not self._curr and actions: return self.expression( @@ -5559,6 +5559,7 @@ class Parser(metaclass=_Parser): exists=exists, actions=actions, only=only, + options=options, ) return self._parse_as_command(start) diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index eab61052..004b2882 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -1187,7 +1187,7 @@ class Tokenizer(metaclass=_Tokenizer): self._advance() elif self._peek == "." and not decimal: after = self.peek(1) - if after.isdigit() or not after.isalpha(): + if after.isdigit() or not (after.isalpha() or after == "_"): decimal = True self._advance() else: diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs index 94a8b084..927f3d80 100644 --- a/sqlglotrs/src/tokenizer.rs +++ b/sqlglotrs/src/tokenizer.rs @@ -470,7 +470,7 @@ impl<'a> TokenizerState<'a> { self.advance(1)?; } else if self.peek_char == '.' && !decimal { let after = self.peek(1)?; - if after.is_digit(10) || !after.is_alphabetic() { + if after.is_digit(10) || !(after.is_alphabetic() || after == '_') { decimal = true; self.advance(1)?; } else {
tobymao/sqlglot
c9eef99b8fe3367c22a8186fb397ad550ac11386
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py index 38c19a77..db8da304 100644 --- a/tests/dialects/test_bigquery.py +++ b/tests/dialects/test_bigquery.py @@ -7,6 +7,7 @@ from sqlglot import ( UnsupportedError, exp, parse, + parse_one, transpile, ) from sqlglot.helper import logger as helper_logger @@ -40,6 +41,11 @@ class TestBigQuery(Validator): }, ) + table = parse_one("x-0._y.z", dialect="bigquery", into=exp.Table) + self.assertEqual(table.catalog, "x-0") + self.assertEqual(table.db, "_y") + self.assertEqual(table.name, "z") + self.validate_identity("SELECT * FROM x-0.y") self.assertEqual(exp.to_table("`x.y.z`", dialect="bigquery").sql(), '"x"."y"."z"') self.assertEqual(exp.to_table("`x.y.z`", dialect="bigquery").sql("bigquery"), "`x.y.z`") diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py index fd27a1ee..5f23c440 100644 --- a/tests/dialects/test_mysql.py +++ b/tests/dialects/test_mysql.py @@ -29,6 +29,7 @@ class TestMySQL(Validator): self.validate_identity("CREATE TABLE foo (a BIGINT, INDEX USING BTREE (b))") self.validate_identity("CREATE TABLE foo (a BIGINT, FULLTEXT INDEX (b))") self.validate_identity("CREATE TABLE foo (a BIGINT, SPATIAL INDEX (b))") + self.validate_identity("ALTER TABLE t1 ADD COLUMN x INT, ALGORITHM=INPLACE, LOCK=EXCLUSIVE") self.validate_identity( "CREATE TABLE `oauth_consumer` (`key` VARCHAR(32) NOT NULL, UNIQUE `OAUTH_CONSUMER_KEY` (`key`))" ) @@ -68,6 +69,26 @@ class TestMySQL(Validator): self.validate_identity( "CREATE OR REPLACE VIEW my_view AS SELECT column1 AS `boo`, column2 AS `foo` FROM my_table WHERE column3 = 'some_value' UNION SELECT q.* FROM fruits_table, JSON_TABLE(Fruits, '$[*]' COLUMNS(id VARCHAR(255) PATH '$.$id', value VARCHAR(255) PATH '$.value')) AS q", ) + self.validate_identity( + "CREATE TABLE `foo` (`id` char(36) NOT NULL DEFAULT (uuid()), PRIMARY KEY (`id`), UNIQUE KEY `id` (`id`))", + "CREATE TABLE `foo` (`id` CHAR(36) NOT NULL DEFAULT (UUID()), PRIMARY KEY (`id`), UNIQUE `id` (`id`))", + ) + self.validate_identity( + "CREATE TABLE IF NOT EXISTS industry_info (a BIGINT(20) NOT NULL AUTO_INCREMENT, b BIGINT(20) NOT NULL, c VARCHAR(1000), PRIMARY KEY (a), UNIQUE KEY d (b), KEY e (b))", + "CREATE TABLE IF NOT EXISTS industry_info (a BIGINT(20) NOT NULL AUTO_INCREMENT, b BIGINT(20) NOT NULL, c VARCHAR(1000), PRIMARY KEY (a), UNIQUE d (b), INDEX e (b))", + ) + self.validate_identity( + "CREATE TABLE test (ts TIMESTAMP, ts_tz TIMESTAMPTZ, ts_ltz TIMESTAMPLTZ)", + "CREATE TABLE test (ts DATETIME, ts_tz TIMESTAMP, ts_ltz TIMESTAMP)", + ) + self.validate_identity( + "ALTER TABLE test_table ALTER COLUMN test_column SET DATA TYPE LONGTEXT", + "ALTER TABLE test_table MODIFY COLUMN test_column LONGTEXT", + ) + self.validate_identity( + "CREATE TABLE t (c DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC", + "CREATE TABLE t (c DATETIME DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP()) DEFAULT CHARACTER SET=utf8 ROW_FORMAT=DYNAMIC", + ) self.validate_all( "CREATE TABLE z (a INT) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARACTER SET=utf8 COLLATE=utf8_bin COMMENT='x'", @@ -78,12 +99,6 @@ class TestMySQL(Validator): "sqlite": "CREATE TABLE z (a INTEGER)", }, ) - self.validate_all( - "CREATE TABLE t (c DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC", - write={ - "mysql": "CREATE TABLE t (c DATETIME DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP()) DEFAULT CHARACTER SET=utf8 ROW_FORMAT=DYNAMIC", - }, - ) self.validate_all( "CREATE TABLE x (id int not null auto_increment, primary key (id))", write={ @@ -96,33 +111,9 @@ class TestMySQL(Validator): "sqlite": "CREATE TABLE x (id INTEGER NOT NULL)", }, ) - self.validate_all( - "CREATE TABLE `foo` (`id` char(36) NOT NULL DEFAULT (uuid()), PRIMARY KEY (`id`), UNIQUE KEY `id` (`id`))", - write={ - "mysql": "CREATE TABLE `foo` (`id` CHAR(36) NOT NULL DEFAULT (UUID()), PRIMARY KEY (`id`), UNIQUE `id` (`id`))", - }, - ) - self.validate_all( - "CREATE TABLE IF NOT EXISTS industry_info (a BIGINT(20) NOT NULL AUTO_INCREMENT, b BIGINT(20) NOT NULL, c VARCHAR(1000), PRIMARY KEY (a), UNIQUE KEY d (b), KEY e (b))", - write={ - "mysql": "CREATE TABLE IF NOT EXISTS industry_info (a BIGINT(20) NOT NULL AUTO_INCREMENT, b BIGINT(20) NOT NULL, c VARCHAR(1000), PRIMARY KEY (a), UNIQUE d (b), INDEX e (b))", - }, - ) - self.validate_all( - "CREATE TABLE test (ts TIMESTAMP, ts_tz TIMESTAMPTZ, ts_ltz TIMESTAMPLTZ)", - write={ - "mysql": "CREATE TABLE test (ts DATETIME, ts_tz TIMESTAMP, ts_ltz TIMESTAMP)", - }, - ) - self.validate_all( - "ALTER TABLE test_table ALTER COLUMN test_column SET DATA TYPE LONGTEXT", - write={ - "mysql": "ALTER TABLE test_table MODIFY COLUMN test_column LONGTEXT", - }, - ) - self.validate_identity("ALTER TABLE test_table ALTER COLUMN test_column SET DEFAULT 1") def test_identity(self): + self.validate_identity("ALTER TABLE test_table ALTER COLUMN test_column SET DEFAULT 1") self.validate_identity("SELECT DATE_FORMAT(NOW(), '%Y-%m-%d %H:%i:00.0000')") self.validate_identity("SELECT @var1 := 1, @var2") self.validate_identity("UNLOCK TABLES")
Online DDL in Mysql Could support for Mysql's online DDL please be added to Sqlglot? That is, DDL operations should support `ALGORITHM` and `LOCK` clauses. See, e.g. https://dev.mysql.com/doc/refman/8.0/en/innodb-online-ddl.html Here's an example of what I tried to do, the last step is what I'd like to see improved. ``` >>> import sys; sys.version_info sys.version_info(major=3, minor=11, micro=7, releaselevel='final', serial=0) >>> import sqlglot; sqlglot._version.version_tuple (21, 1, 2) >>> sqlglot.parse_one("ALTER TABLE t1 ADD COLUMN x INT;", dialect="mysql") AlterTable( this=Table( this=Identifier(this=t1, quoted=False)), actions=[ ColumnDef( this=Identifier(this=x, quoted=False), kind=DataType(this=Type.INT, nested=False))]) >>> sqlglot.parse_one("ALTER TABLE t1 ADD COLUMN x INT, ALGORITHM=INPLACE, LOCK=EXCLUSIVE;", dialect="mysql") 'ALTER TABLE t1 ADD COLUMN x INT, ALGORITHM=INPLACE, LOCK=EXCLUSIVE' contains unsupported syntax. Falling back to parsing as a 'Command'. Command(this=ALTER, expression=TABLE t1 ADD COLUMN x INT, ALGORITHM=INPLACE, LOCK=EXCLUSIVE) ``` I'll take a look at the code myself but I suspect it will take me some time to understand how Sqlglot's modules are interrelated and the authors can probably do it faster/better. Thanks!
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery" ]
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_errors", "tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat", "tests/dialects/test_bigquery.py::TestBigQuery::test_json_object", "tests/dialects/test_bigquery.py::TestBigQuery::test_merge", "tests/dialects/test_bigquery.py::TestBigQuery::test_models", "tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names", "tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types", "tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table", "tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions", "tests/dialects/test_bigquery.py::TestBigQuery::test_warnings", "tests/dialects/test_mysql.py::TestMySQL::test_bits_literal", "tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions", "tests/dialects/test_mysql.py::TestMySQL::test_convert", "tests/dialects/test_mysql.py::TestMySQL::test_date_format", "tests/dialects/test_mysql.py::TestMySQL::test_ddl", "tests/dialects/test_mysql.py::TestMySQL::test_escape", "tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal", "tests/dialects/test_mysql.py::TestMySQL::test_identity", "tests/dialects/test_mysql.py::TestMySQL::test_introducers", "tests/dialects/test_mysql.py::TestMySQL::test_is_null", "tests/dialects/test_mysql.py::TestMySQL::test_json_object", "tests/dialects/test_mysql.py::TestMySQL::test_match_against", "tests/dialects/test_mysql.py::TestMySQL::test_monthname", "tests/dialects/test_mysql.py::TestMySQL::test_mysql", "tests/dialects/test_mysql.py::TestMySQL::test_mysql_time", "tests/dialects/test_mysql.py::TestMySQL::test_safe_div", "tests/dialects/test_mysql.py::TestMySQL::test_set_variable", "tests/dialects/test_mysql.py::TestMySQL::test_show_columns", "tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql", "tests/dialects/test_mysql.py::TestMySQL::test_show_engine", "tests/dialects/test_mysql.py::TestMySQL::test_show_errors", "tests/dialects/test_mysql.py::TestMySQL::test_show_events", "tests/dialects/test_mysql.py::TestMySQL::test_show_grants", "tests/dialects/test_mysql.py::TestMySQL::test_show_index", "tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where", "tests/dialects/test_mysql.py::TestMySQL::test_show_name", "tests/dialects/test_mysql.py::TestMySQL::test_show_processlist", "tests/dialects/test_mysql.py::TestMySQL::test_show_profile", "tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status", "tests/dialects/test_mysql.py::TestMySQL::test_show_simple", "tests/dialects/test_mysql.py::TestMySQL::test_show_tables", "tests/dialects/test_mysql.py::TestMySQL::test_string_literals", "tests/dialects/test_mysql.py::TestMySQL::test_types" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-02-26T14:20:22Z"
mit
tobymao__sqlglot-3045
diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py index fcb3aab9..c3888a3b 100644 --- a/sqlglot/dialects/oracle.py +++ b/sqlglot/dialects/oracle.py @@ -93,6 +93,14 @@ class Oracle(Dialect): "XMLTABLE": lambda self: self._parse_xml_table(), } + PROPERTY_PARSERS = { + **parser.Parser.PROPERTY_PARSERS, + "GLOBAL": lambda self: self._match_text_seq("TEMPORARY") + and self.expression(exp.TemporaryProperty, this="GLOBAL"), + "PRIVATE": lambda self: self._match_text_seq("TEMPORARY") + and self.expression(exp.TemporaryProperty, this="PRIVATE"), + } + QUERY_MODIFIER_PARSERS = { **parser.Parser.QUERY_MODIFIER_PARSERS, TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), @@ -207,6 +215,7 @@ class Oracle(Dialect): exp.Substring: rename_func("SUBSTR"), exp.Table: lambda self, e: self.table_sql(e, sep=" "), exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "), + exp.TemporaryProperty: lambda _, e: f"{e.name or 'GLOBAL'} TEMPORARY", exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)), exp.ToChar: lambda self, e: self.function_fallback_sql(e), exp.Trim: trim_sql, diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index ee4cdde5..bfffe31d 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -2463,7 +2463,7 @@ class StabilityProperty(Property): class TemporaryProperty(Property): - arg_types = {} + arg_types = {"this": False} class TransformModelProperty(Property): diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 66466734..e8307dfa 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -73,7 +73,7 @@ class Generator(metaclass=_Generator): TRANSFORMS: t.Dict[t.Type[exp.Expression], t.Callable[..., str]] = { **JSON_PATH_PART_TRANSFORMS, exp.AutoRefreshProperty: lambda self, e: f"AUTO REFRESH {self.sql(e, 'this')}", - exp.CaseSpecificColumnConstraint: lambda self, + exp.CaseSpecificColumnConstraint: lambda _, e: f"{'NOT ' if e.args.get('not_') else ''}CASESPECIFIC", exp.CharacterSetColumnConstraint: lambda self, e: f"CHARACTER SET {self.sql(e, 'this')}", exp.CharacterSetProperty: lambda self, @@ -82,7 +82,7 @@ class Generator(metaclass=_Generator): e: f"CLUSTERED ({self.expressions(e, 'this', indent=False)})", exp.CollateColumnConstraint: lambda self, e: f"COLLATE {self.sql(e, 'this')}", exp.CommentColumnConstraint: lambda self, e: f"COMMENT {self.sql(e, 'this')}", - exp.CopyGrantsProperty: lambda self, e: "COPY GRANTS", + exp.CopyGrantsProperty: lambda *_: "COPY GRANTS", exp.DateAdd: lambda self, e: self.func( "DATE_ADD", e.this, e.expression, exp.Literal.string(e.text("unit")) ), @@ -90,8 +90,8 @@ class Generator(metaclass=_Generator): exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}", exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}", exp.ExecuteAsProperty: lambda self, e: self.naked_property(e), - exp.ExternalProperty: lambda self, e: "EXTERNAL", - exp.HeapProperty: lambda self, e: "HEAP", + exp.ExternalProperty: lambda *_: "EXTERNAL", + exp.HeapProperty: lambda *_: "HEAP", exp.InheritsProperty: lambda self, e: f"INHERITS ({self.expressions(e, flat=True)})", exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}", exp.InputModelProperty: lambda self, e: f"INPUT{self.sql(e, 'this')}", @@ -104,13 +104,13 @@ class Generator(metaclass=_Generator): ), exp.LanguageProperty: lambda self, e: self.naked_property(e), exp.LocationProperty: lambda self, e: self.naked_property(e), - exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG", - exp.MaterializedProperty: lambda self, e: "MATERIALIZED", + exp.LogProperty: lambda _, e: f"{'NO ' if e.args.get('no') else ''}LOG", + exp.MaterializedProperty: lambda *_: "MATERIALIZED", exp.NonClusteredColumnConstraint: lambda self, e: f"NONCLUSTERED ({self.expressions(e, 'this', indent=False)})", - exp.NoPrimaryIndexProperty: lambda self, e: "NO PRIMARY INDEX", - exp.NotForReplicationColumnConstraint: lambda self, e: "NOT FOR REPLICATION", - exp.OnCommitProperty: lambda self, + exp.NoPrimaryIndexProperty: lambda *_: "NO PRIMARY INDEX", + exp.NotForReplicationColumnConstraint: lambda *_: "NOT FOR REPLICATION", + exp.OnCommitProperty: lambda _, e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS", exp.OnProperty: lambda self, e: f"ON {self.sql(e, 'this')}", exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}", @@ -121,21 +121,21 @@ class Generator(metaclass=_Generator): exp.ReturnsProperty: lambda self, e: self.naked_property(e), exp.SampleProperty: lambda self, e: f"SAMPLE BY {self.sql(e, 'this')}", exp.SetConfigProperty: lambda self, e: self.sql(e, "this"), - exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET", + exp.SetProperty: lambda _, e: f"{'MULTI' if e.args.get('multi') else ''}SET", exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}", - exp.SqlReadWriteProperty: lambda self, e: e.name, - exp.SqlSecurityProperty: lambda self, + exp.SqlReadWriteProperty: lambda _, e: e.name, + exp.SqlSecurityProperty: lambda _, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}", - exp.StabilityProperty: lambda self, e: e.name, - exp.TemporaryProperty: lambda self, e: "TEMPORARY", + exp.StabilityProperty: lambda _, e: e.name, + exp.TemporaryProperty: lambda *_: "TEMPORARY", exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}", exp.Timestamp: lambda self, e: self.func("TIMESTAMP", e.this, e.expression), exp.ToTableProperty: lambda self, e: f"TO {self.sql(e.this)}", exp.TransformModelProperty: lambda self, e: self.func("TRANSFORM", *e.expressions), - exp.TransientProperty: lambda self, e: "TRANSIENT", - exp.UppercaseColumnConstraint: lambda self, e: "UPPERCASE", + exp.TransientProperty: lambda *_: "TRANSIENT", + exp.UppercaseColumnConstraint: lambda *_: "UPPERCASE", exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]), - exp.VolatileProperty: lambda self, e: "VOLATILE", + exp.VolatileProperty: lambda *_: "VOLATILE", exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}", }
tobymao/sqlglot
7b2cff84f9a544435aa22954536eb7c9c2632816
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py index d49f7e91..9028b031 100644 --- a/tests/dialects/test_oracle.py +++ b/tests/dialects/test_oracle.py @@ -1,4 +1,4 @@ -from sqlglot import exp, parse_one +from sqlglot import exp from sqlglot.errors import UnsupportedError from tests.dialects.test_dialect import Validator @@ -7,11 +7,11 @@ class TestOracle(Validator): dialect = "oracle" def test_oracle(self): - self.validate_identity("REGEXP_REPLACE('source', 'search')") - parse_one("ALTER TABLE tbl_name DROP FOREIGN KEY fk_symbol", dialect="oracle").assert_is( - exp.AlterTable - ) + self.parse_one("ALTER TABLE tbl_name DROP FOREIGN KEY fk_symbol").assert_is(exp.AlterTable) + self.validate_identity("CREATE GLOBAL TEMPORARY TABLE t AS SELECT * FROM orders") + self.validate_identity("CREATE PRIVATE TEMPORARY TABLE t AS SELECT * FROM orders") + self.validate_identity("REGEXP_REPLACE('source', 'search')") self.validate_identity("TIMESTAMP(3) WITH TIME ZONE") self.validate_identity("CURRENT_TIMESTAMP(precision)") self.validate_identity("ALTER TABLE tbl_name DROP FOREIGN KEY fk_symbol") diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py index a615c190..a0df87f0 100644 --- a/tests/dialects/test_tsql.py +++ b/tests/dialects/test_tsql.py @@ -144,7 +144,7 @@ class TestTSQL(Validator): "tsql": "CREATE TABLE #mytemptable (a INTEGER)", "snowflake": "CREATE TEMPORARY TABLE mytemptable (a INT)", "duckdb": "CREATE TEMPORARY TABLE mytemptable (a INT)", - "oracle": "CREATE TEMPORARY TABLE mytemptable (a NUMBER)", + "oracle": "CREATE GLOBAL TEMPORARY TABLE mytemptable (a NUMBER)", "hive": "CREATE TEMPORARY TABLE mytemptable (a INT)", "spark2": "CREATE TEMPORARY TABLE mytemptable (a INT) USING PARQUET", "spark": "CREATE TEMPORARY TABLE mytemptable (a INT) USING PARQUET",
feat: support for temporary tables in Oracle dialect **Is your feature request related to a problem? Please describe.** Oracle requires either `PRIVATE` or `GLOBAL` keyword when creating a temporary table -- the syntax `CREATE TEMPORARY TABLE` is invalid, it should be either `CREATE PRIVATE TEMPORARY TABLE` or `CREATE GLOBAL TEMPORARY TABLE` **Describe the solution you'd like** the `PRIVATE` temp tables have a bunch of restrictions and seem unlike most other temporary tables to me -- I think adding `GLOBAL` to the Oracle dialect would get most users where they're likely to want to go. **Describe alternatives you've considered** I've got a heinous hack in Ibis where I intercept the output of `self.create_table` and then do a string replace to add in `GLOBAL`, and that works fine, but it's brittle and doesn't help anyone else. **Additional context** https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/CREATE-TABLE.html#GUID-F9CE0CC3-13AE-4744-A43C-EAC7A71AAAB6
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_tsql.py::TestTSQL::test_tsql" ]
[ "tests/dialects/test_oracle.py::TestOracle::test_connect_by", "tests/dialects/test_oracle.py::TestOracle::test_hints", "tests/dialects/test_oracle.py::TestOracle::test_join_marker", "tests/dialects/test_oracle.py::TestOracle::test_json_table", "tests/dialects/test_oracle.py::TestOracle::test_match_recognize", "tests/dialects/test_oracle.py::TestOracle::test_oracle", "tests/dialects/test_oracle.py::TestOracle::test_xml_table", "tests/dialects/test_tsql.py::TestTSQL::test__types_ints", "tests/dialects/test_tsql.py::TestTSQL::test_add_date", "tests/dialects/test_tsql.py::TestTSQL::test_charindex", "tests/dialects/test_tsql.py::TestTSQL::test_commit", "tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format", "tests/dialects/test_tsql.py::TestTSQL::test_current_user", "tests/dialects/test_tsql.py::TestTSQL::test_date_diff", "tests/dialects/test_tsql.py::TestTSQL::test_datefromparts", "tests/dialects/test_tsql.py::TestTSQL::test_datename", "tests/dialects/test_tsql.py::TestTSQL::test_datepart", "tests/dialects/test_tsql.py::TestTSQL::test_ddl", "tests/dialects/test_tsql.py::TestTSQL::test_eomonth", "tests/dialects/test_tsql.py::TestTSQL::test_format", "tests/dialects/test_tsql.py::TestTSQL::test_fullproc", "tests/dialects/test_tsql.py::TestTSQL::test_hints", "tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes", "tests/dialects/test_tsql.py::TestTSQL::test_insert_cte", "tests/dialects/test_tsql.py::TestTSQL::test_isnull", "tests/dialects/test_tsql.py::TestTSQL::test_json", "tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery", "tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function", "tests/dialects/test_tsql.py::TestTSQL::test_len", "tests/dialects/test_tsql.py::TestTSQL::test_openjson", "tests/dialects/test_tsql.py::TestTSQL::test_option", "tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords", "tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs", "tests/dialects/test_tsql.py::TestTSQL::test_replicate", "tests/dialects/test_tsql.py::TestTSQL::test_rollback", "tests/dialects/test_tsql.py::TestTSQL::test_set", "tests/dialects/test_tsql.py::TestTSQL::test_string", "tests/dialects/test_tsql.py::TestTSQL::test_system_time", "tests/dialects/test_tsql.py::TestTSQL::test_temp_table", "tests/dialects/test_tsql.py::TestTSQL::test_temporal_table", "tests/dialects/test_tsql.py::TestTSQL::test_top", "tests/dialects/test_tsql.py::TestTSQL::test_transaction", "tests/dialects/test_tsql.py::TestTSQL::test_types", "tests/dialects/test_tsql.py::TestTSQL::test_types_bin", "tests/dialects/test_tsql.py::TestTSQL::test_types_date", "tests/dialects/test_tsql.py::TestTSQL::test_types_decimals", "tests/dialects/test_tsql.py::TestTSQL::test_types_string", "tests/dialects/test_tsql.py::TestTSQL::test_udf" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-02-28T13:44:51Z"
mit
tobymao__sqlglot-3073
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 1a248750..b3c63460 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1605,7 +1605,7 @@ class TitleColumnConstraint(ColumnConstraintKind): class UniqueColumnConstraint(ColumnConstraintKind): - arg_types = {"this": False, "index_type": False} + arg_types = {"this": False, "index_type": False, "on_conflict": False} class UppercaseColumnConstraint(ColumnConstraintKind): @@ -1883,8 +1883,8 @@ class OnConflict(Expression): arg_types = { "duplicate": False, "expressions": False, - "nothing": False, - "key": False, + "action": False, + "conflict_keys": False, "constraint": False, } diff --git a/sqlglot/generator.py b/sqlglot/generator.py index e6f5c4b0..753d4391 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -869,7 +869,9 @@ class Generator(metaclass=_Generator): this = f" {this}" if this else "" index_type = expression.args.get("index_type") index_type = f" USING {index_type}" if index_type else "" - return f"UNIQUE{this}{index_type}" + on_conflict = self.sql(expression, "on_conflict") + on_conflict = f" {on_conflict}" if on_conflict else "" + return f"UNIQUE{this}{index_type}{on_conflict}" def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str: return self.sql(expression, "this") @@ -1457,14 +1459,15 @@ class Generator(metaclass=_Generator): where = self.sql(expression, "where") where = f"{self.sep()}REPLACE WHERE {where}" if where else "" expression_sql = f"{self.sep()}{self.sql(expression, 'expression')}" - conflict = self.sql(expression, "conflict") + on_conflict = self.sql(expression, "conflict") + on_conflict = f" {on_conflict}" if on_conflict else "" by_name = " BY NAME" if expression.args.get("by_name") else "" returning = self.sql(expression, "returning") if self.RETURNING_END: - expression_sql = f"{expression_sql}{conflict}{returning}" + expression_sql = f"{expression_sql}{on_conflict}{returning}" else: - expression_sql = f"{returning}{expression_sql}{conflict}" + expression_sql = f"{returning}{expression_sql}{on_conflict}" sql = f"INSERT{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}" return self.prepend_ctes(expression, sql) @@ -1496,17 +1499,20 @@ class Generator(metaclass=_Generator): def onconflict_sql(self, expression: exp.OnConflict) -> str: conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT" + constraint = self.sql(expression, "constraint") - if constraint: - constraint = f"ON CONSTRAINT {constraint}" - key = self.expressions(expression, key="key", flat=True) - do = "" if expression.args.get("duplicate") else " DO " - nothing = "NOTHING" if expression.args.get("nothing") else "" + constraint = f" ON CONSTRAINT {constraint}" if constraint else "" + + conflict_keys = self.expressions(expression, key="conflict_keys", flat=True) + conflict_keys = f"({conflict_keys}) " if conflict_keys else " " + action = self.sql(expression, "action") + expressions = self.expressions(expression, flat=True) - set_keyword = "SET " if self.DUPLICATE_KEY_UPDATE_WITH_SET else "" if expressions: - expressions = f"UPDATE {set_keyword}{expressions}" - return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}" + set_keyword = "SET " if self.DUPLICATE_KEY_UPDATE_WITH_SET else "" + expressions = f" {set_keyword}{expressions}" + + return f"{conflict}{constraint}{conflict_keys}{action}{expressions}" def returning_sql(self, expression: exp.Returning) -> str: return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}" diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 49dac2ea..ad2907b8 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -966,6 +966,11 @@ class Parser(metaclass=_Parser): "READ": ("WRITE", "ONLY"), } + CONFLICT_ACTIONS: OPTIONS_TYPE = dict.fromkeys( + ("ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK", "UPDATE"), tuple() + ) + CONFLICT_ACTIONS["DO"] = ("NOTHING", "UPDATE") + USABLES: OPTIONS_TYPE = dict.fromkeys(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"), tuple()) INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"} @@ -2112,31 +2117,31 @@ class Parser(metaclass=_Parser): if not conflict and not duplicate: return None - nothing = None - expressions = None - key = None + conflict_keys = None constraint = None if conflict: if self._match_text_seq("ON", "CONSTRAINT"): constraint = self._parse_id_var() - else: - key = self._parse_csv(self._parse_value) + elif self._match(TokenType.L_PAREN): + conflict_keys = self._parse_csv(self._parse_id_var) + self._match_r_paren() - self._match_text_seq("DO") - if self._match_text_seq("NOTHING"): - nothing = True - else: - self._match(TokenType.UPDATE) + action = self._parse_var_from_options( + self.CONFLICT_ACTIONS, + ) + if self._prev.token_type == TokenType.UPDATE: self._match(TokenType.SET) expressions = self._parse_csv(self._parse_equality) + else: + expressions = None return self.expression( exp.OnConflict, duplicate=duplicate, expressions=expressions, - nothing=nothing, - key=key, + action=action, + conflict_keys=conflict_keys, constraint=constraint, ) @@ -4417,9 +4422,7 @@ class Parser(metaclass=_Parser): self._match_text_seq("LENGTH") return self.expression(exp.InlineLengthColumnConstraint, this=self._parse_bitwise()) - def _parse_not_constraint( - self, - ) -> t.Optional[exp.Expression]: + def _parse_not_constraint(self) -> t.Optional[exp.Expression]: if self._match_text_seq("NULL"): return self.expression(exp.NotNullColumnConstraint) if self._match_text_seq("CASESPECIFIC"): @@ -4447,16 +4450,21 @@ class Parser(metaclass=_Parser): if not self._match(TokenType.CONSTRAINT): return self._parse_unnamed_constraint(constraints=self.SCHEMA_UNNAMED_CONSTRAINTS) - this = self._parse_id_var() - expressions = [] + return self.expression( + exp.Constraint, + this=self._parse_id_var(), + expressions=self._parse_unnamed_constraints(), + ) + def _parse_unnamed_constraints(self) -> t.List[exp.Expression]: + constraints = [] while True: constraint = self._parse_unnamed_constraint() or self._parse_function() if not constraint: break - expressions.append(constraint) + constraints.append(constraint) - return self.expression(exp.Constraint, this=this, expressions=expressions) + return constraints def _parse_unnamed_constraint( self, constraints: t.Optional[t.Collection[str]] = None @@ -4478,6 +4486,7 @@ class Parser(metaclass=_Parser): exp.UniqueColumnConstraint, this=self._parse_schema(self._parse_id_var(any_token=False)), index_type=self._match(TokenType.USING) and self._advance_any() and self._prev.text, + on_conflict=self._parse_on_conflict(), ) def _parse_key_constraint_options(self) -> t.List[str]:
tobymao/sqlglot
223a4751f88809710872fa7d757d22d9eeeb4f40
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 1d0ea8bd..fe4e3533 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -691,13 +691,13 @@ class TestPostgres(Validator): "CREATE INDEX index_issues_on_title_trigram ON public.issues USING gin(title public.gin_trgm_ops)" ) self.validate_identity( - "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT (id) DO NOTHING RETURNING *" + "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT(id) DO NOTHING RETURNING *" ) self.validate_identity( - "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT (id) DO UPDATE SET x.id = 1 RETURNING *" + "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT(id) DO UPDATE SET x.id = 1 RETURNING *" ) self.validate_identity( - "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT (id) DO UPDATE SET x.id = excluded.id RETURNING *" + "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT(id) DO UPDATE SET x.id = excluded.id RETURNING *" ) self.validate_identity( "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT ON CONSTRAINT pkey DO NOTHING RETURNING *" diff --git a/tests/dialects/test_sqlite.py b/tests/dialects/test_sqlite.py index 2421987b..e935c194 100644 --- a/tests/dialects/test_sqlite.py +++ b/tests/dialects/test_sqlite.py @@ -7,6 +7,10 @@ class TestSQLite(Validator): dialect = "sqlite" def test_ddl(self): + for conflict_action in ("ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"): + with self.subTest(f"ON CONFLICT {conflict_action}"): + self.validate_identity("CREATE TABLE a (b, c, UNIQUE (b, c) ON CONFLICT IGNORE)") + self.validate_identity("INSERT OR ABORT INTO foo (x, y) VALUES (1, 2)") self.validate_identity("INSERT OR FAIL INTO foo (x, y) VALUES (1, 2)") self.validate_identity("INSERT OR IGNORE INTO foo (x, y) VALUES (1, 2)")
SQLite ParseError: Unique Table Constraint- cannot parse On Conflict Clause sqlglot Verison is 22.2.0 #### MVE ```python import sqlglot inputstring = """CREATE TABLE a ( b, c, UNIQUE (b, c) ON CONFLICT IGNORE );""" print(sqlglot.parse(inputstring, dialect='sqlite')) ``` #### Raises ``` sqlglot.errors.ParseError: Expecting ). Line 4, Col: 20. CREATE TABLE a ( b, c, UNIQUE (b, c) ON CONFLICT IGNORE ); ``` (**ON** is underlined in terminal) #### Official Docs [SQLite Create Table Docs](https://www.sqlite.org/lang_createtable.html) Tested at on the [official fiddle](https://sqlite.org/fiddle/) to make sure SQL was a valid statement using ```sqlite CREATE TABLE a ( b, c, UNIQUE (b, c) ON CONFLICT IGNORE ); INSERT INTO a(b,c) VALUES (1,1), (2,1), (1,1); SELECT * FROM a; ``` Output is two rows as expected: (1,1), (2,1)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_ddl", "tests/dialects/test_sqlite.py::TestSQLite::test_ddl" ]
[ "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_postgres", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_unnest_json_array", "tests/dialects/test_postgres.py::TestPostgres::test_variance", "tests/dialects/test_sqlite.py::TestSQLite::test_datediff", "tests/dialects/test_sqlite.py::TestSQLite::test_hexadecimal_literal", "tests/dialects/test_sqlite.py::TestSQLite::test_longvarchar_dtype", "tests/dialects/test_sqlite.py::TestSQLite::test_sqlite", "tests/dialects/test_sqlite.py::TestSQLite::test_warnings", "tests/dialects/test_sqlite.py::TestSQLite::test_window_null_treatment" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-03T14:23:49Z"
mit
tobymao__sqlglot-3077
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index b3c63460..acf18ff1 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1828,6 +1828,7 @@ class Index(Expression): class Insert(DDL, DML): arg_types = { + "hint": False, "with": False, "this": True, "expression": False, diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 9e0f4da9..f0264197 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1451,6 +1451,7 @@ class Generator(metaclass=_Generator): return f"{sql})" def insert_sql(self, expression: exp.Insert) -> str: + hint = self.sql(expression, "hint") overwrite = expression.args.get("overwrite") if isinstance(expression.this, exp.Directory): @@ -1481,7 +1482,7 @@ class Generator(metaclass=_Generator): else: expression_sql = f"{returning}{expression_sql}{on_conflict}" - sql = f"INSERT{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}" + sql = f"INSERT{hint}{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}" return self.prepend_ctes(expression, sql) def intersect_sql(self, expression: exp.Intersect) -> str: diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 8da2eacd..8ea2dbab 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -2061,6 +2061,7 @@ class Parser(metaclass=_Parser): def _parse_insert(self) -> exp.Insert: comments = ensure_list(self._prev_comments) + hint = self._parse_hint() overwrite = self._match(TokenType.OVERWRITE) ignore = self._match(TokenType.IGNORE) local = self._match_text_seq("LOCAL") @@ -2087,6 +2088,7 @@ class Parser(metaclass=_Parser): return self.expression( exp.Insert, comments=comments, + hint=hint, this=this, by_name=self._match_text_seq("BY", "NAME"), exists=self._parse_exists(),
tobymao/sqlglot
4173ea29bbd8944896c259fe45209de69fcbdc46
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py index 9438507b..67e18460 100644 --- a/tests/dialects/test_oracle.py +++ b/tests/dialects/test_oracle.py @@ -210,6 +210,8 @@ class TestOracle(Validator): self.validate_identity( "SELECT /*+ LEADING(e j) */ * FROM employees e, departments d, job_history j WHERE e.department_id = d.department_id AND e.hire_date = j.start_date" ) + self.validate_identity("INSERT /*+ APPEND */ INTO IAP_TBL (id, col1) VALUES (2, 'test2')") + self.validate_identity("INSERT /*+ APPEND_VALUES */ INTO dest_table VALUES (i, 'Value')") def test_xml_table(self): self.validate_identity("XMLTABLE('x')")
Oracle - insert append - error parsing hint Error in parsing Oracle insert append. # SQL ```SQL -- Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production select banner from v$version; -- create table create table iap_tbl (id int, col1 varchar2(20 char)); / -- normal insert insert into iap_tbl (id, col1) values (1, 'test'); commit; / select * from iap_tbl; / -- insert with append hint insert /*+ append */ into iap_tbl (id, col1) values (2, 'test2'); commit; ``` # Python ```Python # pip show sqlglot #Name: sqlglot #Version: 22.2.0 from sqlglot import parse_one # error parsing hint sql_nok = "insert /*+ append */ into iap_tbl (id, col1) values (2, 'test2')" # ok when + removed from comment, normal comment not hint sql_ok = "insert /* append */ into iap_tbl (id, col1) values (2, 'test2')" parse_one(sql_ok) parse_one(sql_nok) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_oracle.py::TestOracle::test_hints" ]
[ "tests/dialects/test_oracle.py::TestOracle::test_connect_by", "tests/dialects/test_oracle.py::TestOracle::test_join_marker", "tests/dialects/test_oracle.py::TestOracle::test_json_table", "tests/dialects/test_oracle.py::TestOracle::test_match_recognize", "tests/dialects/test_oracle.py::TestOracle::test_oracle", "tests/dialects/test_oracle.py::TestOracle::test_xml_table" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-04T14:50:07Z"
mit
tobymao__sqlglot-3089
diff --git a/sqlglot/dialects/__init__.py b/sqlglot/dialects/__init__.py index 276ad59c..29c65800 100644 --- a/sqlglot/dialects/__init__.py +++ b/sqlglot/dialects/__init__.py @@ -61,6 +61,7 @@ dialect implementations in order to understand how their various components can ---- """ +from sqlglot.dialects.athena import Athena from sqlglot.dialects.bigquery import BigQuery from sqlglot.dialects.clickhouse import ClickHouse from sqlglot.dialects.databricks import Databricks diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py new file mode 100644 index 00000000..dc87d8dc --- /dev/null +++ b/sqlglot/dialects/athena.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +from sqlglot.dialects.trino import Trino +from sqlglot.tokens import TokenType + + +class Athena(Trino): + class Parser(Trino.Parser): + STATEMENT_PARSERS = { + **Trino.Parser.STATEMENT_PARSERS, + TokenType.USING: lambda self: self._parse_as_command(self._prev), + } diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py index f11c0da2..d2533ebc 100644 --- a/sqlglot/dialects/dialect.py +++ b/sqlglot/dialects/dialect.py @@ -31,6 +31,7 @@ class Dialects(str, Enum): DIALECT = "" + ATHENA = "athena" BIGQUERY = "bigquery" CLICKHOUSE = "clickhouse" DATABRICKS = "databricks"
tobymao/sqlglot
d898f559fac44789da08689e835619f978c05a3e
diff --git a/tests/dialects/test_athena.py b/tests/dialects/test_athena.py new file mode 100644 index 00000000..99e36f21 --- /dev/null +++ b/tests/dialects/test_athena.py @@ -0,0 +1,16 @@ +from tests.dialects.test_dialect import Validator + + +class TestAthena(Validator): + dialect = "athena" + maxDiff = None + + def test_athena(self): + self.validate_identity( + """USING EXTERNAL FUNCTION some_function(input VARBINARY) + RETURNS VARCHAR + LAMBDA 'some-name' + SELECT + some_function(1)""", + check_command_warning=True, + )
Support User Defined Functions on Athena Dialect It looks like sqlglot is not able to parse [AWS Athena's user defined functions syntax](https://docs.aws.amazon.com/athena/latest/ug/querying-udf.html): ```py from sqlglot import parse from sqlglot.dialects import Trino parse(""" USING EXTERNAL FUNCTION some_function(input VARBINARY) RETURNS VARCHAR LAMBDA 'some-name' SELECT some_function(1) """, dialect=Trino) ``` Exception: ``` sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 2, Col: 9. USING EXTERNAL FUNCTION some_function(input VARBINARY) RETURNS VARCHAR LAMBDA 'some-name' ``` We are using `Trino` dialect since sqlglot does not have a dedicated one for Athena, as far as I understand, but Athena is based off Trino, so this dialect works otherwise perfectly for our codebase :slightly_smiling_face: Am I missing something? Does it need a dedicated dialect for Athena?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_athena.py::TestAthena::test_athena" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2024-03-06T16:35:36Z"
mit
tobymao__sqlglot-3092
diff --git a/sqlglot/dataframe/sql/dataframe.py b/sqlglot/dataframe/sql/dataframe.py index 0bacbf90..88295749 100644 --- a/sqlglot/dataframe/sql/dataframe.py +++ b/sqlglot/dataframe/sql/dataframe.py @@ -18,8 +18,6 @@ from sqlglot.dataframe.sql.transforms import replace_id_value from sqlglot.dataframe.sql.util import get_tables_from_expression_with_join from sqlglot.dataframe.sql.window import Window from sqlglot.helper import ensure_list, object_to_dict, seq_get -from sqlglot.optimizer import optimize as optimize_func -from sqlglot.optimizer.qualify_columns import quote_identifiers if t.TYPE_CHECKING: from sqlglot.dataframe.sql._typing import ( @@ -308,9 +306,8 @@ class DataFrame: for expression_type, select_expression in select_expressions: select_expression = select_expression.transform(replace_id_value, replacement_mapping) if optimize: - quote_identifiers(select_expression, dialect=dialect) select_expression = t.cast( - exp.Select, optimize_func(select_expression, dialect=dialect) + exp.Select, self.spark._optimize(select_expression, dialect=dialect) ) select_expression = df._replace_cte_names_with_hashes(select_expression) diff --git a/sqlglot/dataframe/sql/session.py b/sqlglot/dataframe/sql/session.py index bfc022bd..4e47aaa9 100644 --- a/sqlglot/dataframe/sql/session.py +++ b/sqlglot/dataframe/sql/session.py @@ -12,6 +12,8 @@ from sqlglot.dataframe.sql.readwriter import DataFrameReader from sqlglot.dataframe.sql.types import StructType from sqlglot.dataframe.sql.util import get_column_mapping_from_schema_input from sqlglot.helper import classproperty +from sqlglot.optimizer import optimize +from sqlglot.optimizer.qualify_columns import quote_identifiers if t.TYPE_CHECKING: from sqlglot.dataframe.sql._typing import ColumnLiterals, SchemaInput @@ -104,8 +106,15 @@ class SparkSession: sel_expression = exp.Select(**select_kwargs) return DataFrame(self, sel_expression) + def _optimize( + self, expression: exp.Expression, dialect: t.Optional[Dialect] = None + ) -> exp.Expression: + dialect = dialect or self.dialect + quote_identifiers(expression, dialect=dialect) + return optimize(expression, dialect=dialect) + def sql(self, sqlQuery: str) -> DataFrame: - expression = sqlglot.parse_one(sqlQuery, read=self.dialect) + expression = self._optimize(sqlglot.parse_one(sqlQuery, read=self.dialect)) if isinstance(expression, exp.Select): df = DataFrame(self, expression) df = df._convert_leaf_to_cte()
tobymao/sqlglot
21e4fca2b744a22981d8ff1696986061d3344d40
diff --git a/tests/dataframe/integration/test_session.py b/tests/dataframe/integration/test_session.py index ec500340..3bb3e204 100644 --- a/tests/dataframe/integration/test_session.py +++ b/tests/dataframe/integration/test_session.py @@ -34,3 +34,10 @@ class TestSessionFunc(DataFrameValidator): .agg(SF.countDistinct(SF.col("employee_id"))) ) self.compare_spark_with_sqlglot(df, dfs, skip_schema_compare=True) + + def test_nameless_column(self): + query = "SELECT MAX(age) FROM employee" + df = self.spark.sql(query) + dfs = self.sqlglot.sql(query) + # Spark will alias the column to `max(age)` while sqlglot will alias to `_col_0` so their schemas will differ + self.compare_spark_with_sqlglot(df, dfs, skip_schema_compare=True) diff --git a/tests/dataframe/unit/test_session.py b/tests/dataframe/unit/test_session.py index e2ebae42..848c6032 100644 --- a/tests/dataframe/unit/test_session.py +++ b/tests/dataframe/unit/test_session.py @@ -79,7 +79,7 @@ class TestDataframeSession(DataFrameSQLValidator): sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark") df = self.spark.sql(query).groupBy(F.col("cola")).agg(F.sum("colb")) self.assertEqual( - "WITH t38189 AS (SELECT cola, colb FROM table), t42330 AS (SELECT cola, colb FROM t38189) SELECT cola, SUM(colb) FROM t42330 GROUP BY cola", + "WITH t26614 AS (SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`), t23454 AS (SELECT cola, colb FROM t26614) SELECT cola, SUM(colb) FROM t23454 GROUP BY cola", df.sql(pretty=False, optimize=False)[0], ) @@ -87,14 +87,14 @@ class TestDataframeSession(DataFrameSQLValidator): query = "CREATE TABLE new_table AS WITH t1 AS (SELECT cola, colb FROM table) SELECT cola, colb, FROM t1" sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark") df = self.spark.sql(query) - expected = "CREATE TABLE new_table AS SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`" + expected = "CREATE TABLE `new_table` AS SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`" self.compare_sql(df, expected) def test_sql_insert(self): query = "WITH t1 AS (SELECT cola, colb FROM table) INSERT INTO new_table SELECT cola, colb FROM t1" sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark") df = self.spark.sql(query) - expected = "INSERT INTO new_table SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`" + expected = "INSERT INTO `new_table` SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`" self.compare_sql(df, expected) def test_session_create_builder_patterns(self):
spark sql SELECT MAX without column alias throws error **Fully reproducible code snippet** ```sql spark = SparkSession.builder.config("sqlframe.dialect", "spark").getOrCreate() df = spark.sql(""" SELECT MAX(col) FROM (SELECT 1 as col) t """) ``` throws `sqlglot.errors.ParseError: No expression was parsed from ''` because its `name` [here](https://github.com/tobymao/sqlglot/blob/main/sqlglot/dataframe/sql/dataframe.py#L173) is an empty string. This seems to be an issue with expressions that inherit from [Func](https://github.com/tobymao/sqlglot/blob/main/sqlglot/expressions.py#L4330) (MIN, MAX, ABS, etc). Changing [Max](https://github.com/tobymao/sqlglot/blob/main/sqlglot/expressions.py#L5216) to inherit from [Condition](https://github.com/tobymao/sqlglot/blob/main/sqlglot/expressions.py#L903) directly fixes the issue. This isn't a problem for some more complex expressions that leverage multiple inheritance like [DateAdd](https://github.com/tobymao/sqlglot/blob/main/sqlglot/expressions.py#L4700). **Official Documentation** https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select.html#parameters - aliases are optional in spark sql
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dataframe/unit/test_session.py::TestDataframeSession::test_sql_with_aggs", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_sql_create", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_sql_insert" ]
[ "tests/dataframe/unit/test_session.py::TestDataframeSession::test_sql_select_only", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_no_schema", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_one_row", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_row_mixed_primitives", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_dict_rows", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_session_create_builder_patterns", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_typed_schema_nested", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_typed_schema_basic", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_multiple_rows", "tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_str_schema" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-06T23:50:52Z"
mit
tobymao__sqlglot-3111
diff --git a/README.md b/README.md index 0bfedc66..e8eae258 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ sql = """ */ SELECT tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */, - CAST(x AS INT), # comment 3 + CAST(x AS SIGNED), # comment 3 y -- comment 4 FROM bar /* comment 5 */, @@ -367,7 +367,9 @@ diff(parse_one("SELECT a + b, c, d"), parse_one("SELECT c, a - b, d")) this=Identifier(this=a, quoted=False)), expression=Column( this=Identifier(this=b, quoted=False)))), - Keep(source=Identifier(this=d, quoted=False), target=Identifier(this=d, quoted=False)), + Keep( + source=Column(this=Identifier(this=a, quoted=False)), + target=Column(this=Identifier(this=a, quoted=False))), ... ] ``` @@ -492,6 +494,7 @@ make docs-serve ``` make style # Only linter checks make unit # Only unit tests +make test # Unit and integration tests make check # Full test suite & linter checks ``` diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py index 4a7bd04b..4ba3ac34 100644 --- a/sqlglot/dialects/duckdb.py +++ b/sqlglot/dialects/duckdb.py @@ -199,6 +199,7 @@ class DuckDB(Dialect): "LOGICAL": TokenType.BOOLEAN, "ONLY": TokenType.ONLY, "PIVOT_WIDER": TokenType.PIVOT, + "POSITIONAL": TokenType.POSITIONAL, "SIGNED": TokenType.INT, "STRING": TokenType.VARCHAR, "UBIGINT": TokenType.UBIGINT, diff --git a/sqlglot/parser.py b/sqlglot/parser.py index fb808fda..5f000540 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -507,8 +507,9 @@ class Parser(metaclass=_Parser): } JOIN_METHODS = { - TokenType.NATURAL, TokenType.ASOF, + TokenType.NATURAL, + TokenType.POSITIONAL, } JOIN_SIDES = { diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index 8676eee4..201a3c04 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -317,6 +317,7 @@ class TokenType(AutoName): PERCENT = auto() PIVOT = auto() PLACEHOLDER = auto() + POSITIONAL = auto() PRAGMA = auto() PREWHERE = auto() PRIMARY_KEY = auto()
tobymao/sqlglot
88033dad05550cde05dcb86cce61a621c071382c
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py index b80d507c..35daff09 100644 --- a/tests/dialects/test_duckdb.py +++ b/tests/dialects/test_duckdb.py @@ -213,6 +213,7 @@ class TestDuckDB(Validator): parse_one("a // b", read="duckdb").assert_is(exp.IntDiv).sql(dialect="duckdb"), "a // b" ) + self.validate_identity("SELECT df1.*, df2.* FROM df1 POSITIONAL JOIN df2") self.validate_identity("MAKE_TIMESTAMP(1992, 9, 20, 13, 34, 27.123456)") self.validate_identity("MAKE_TIMESTAMP(1667810584123456)") self.validate_identity("SELECT EPOCH_MS(10) AS t")
DuckDB POSITIONAL JOIN syntax DuckDB supports a unique positional join type which matches up rows of equal length tables, [documented here](https://duckdb.org/docs/sql/query_syntax/from#positional-joins). Currently sqlglot's duckdb dialect does not recognize this syntax and interprets it as an alias for a normal join. Here's a minimal repro, using the example from the documentation: ```python from sqlglot import parse_one parsed = parse_one(""" SELECT df1.*, df2.* FROM df1 POSITIONAL JOIN df2; """, dialect="duckdb") print(f""" {parsed!r} -------------------- {parsed.sql(dialect="duckdb", pretty=True)} """) ``` Which prints ``` Select( expressions=[ Column( this=Star(), table=Identifier(this=df1, quoted=False)), Column( this=Star(), table=Identifier(this=df2, quoted=False))], from=From( this=Table( this=Identifier(this=df1, quoted=False), alias=TableAlias( this=Identifier(this=POSITIONAL, quoted=False)))), joins=[ Join( this=Table( this=Identifier(this=df2, quoted=False)))]) -------------------- SELECT df1.*, df2.* FROM df1 AS POSITIONAL, df2 ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb" ]
[ "tests/dialects/test_duckdb.py::TestDuckDB::test_array", "tests/dialects/test_duckdb.py::TestDuckDB::test_array_index", "tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or", "tests/dialects/test_duckdb.py::TestDuckDB::test_cast", "tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode", "tests/dialects/test_duckdb.py::TestDuckDB::test_isinf", "tests/dialects/test_duckdb.py::TestDuckDB::test_isnan", "tests/dialects/test_duckdb.py::TestDuckDB::test_parameter_token", "tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table", "tests/dialects/test_duckdb.py::TestDuckDB::test_sample", "tests/dialects/test_duckdb.py::TestDuckDB::test_time", "tests/dialects/test_duckdb.py::TestDuckDB::test_timestamps_with_units" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-09T10:02:02Z"
mit
tobymao__sqlglot-3131
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index 804775c3..18154779 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -20,8 +20,7 @@ from sqlglot.dialects.dialect import ( timestrtotime_sql, var_map_sql, ) -from sqlglot.expressions import Literal -from sqlglot.helper import flatten, is_int, seq_get +from sqlglot.helper import flatten, is_float, is_int, seq_get from sqlglot.tokens import TokenType if t.TYPE_CHECKING: @@ -29,33 +28,28 @@ if t.TYPE_CHECKING: # from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html -def _build_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]: - if len(args) == 2: - first_arg, second_arg = args - if second_arg.is_string: - # case: <string_expr> [ , <format> ] - return build_formatted_time(exp.StrToTime, "snowflake")(args) - return exp.UnixToTime(this=first_arg, scale=second_arg) +def _build_timestamp(name: str, kind: exp.DataType.Type) -> t.Callable[[t.List], exp.Func]: + def _builder(args: t.List) -> exp.Func: + value = seq_get(args, 0) - from sqlglot.optimizer.simplify import simplify_literals + if isinstance(value, exp.Literal): + int_value = is_int(value.this) - # The first argument might be an expression like 40 * 365 * 86400, so we try to - # reduce it using `simplify_literals` first and then check if it's a Literal. - first_arg = seq_get(args, 0) - if not isinstance(simplify_literals(first_arg, root=True), Literal): - # case: <variant_expr> or other expressions such as columns - return exp.TimeStrToTime.from_arg_list(args) + # Converts calls like `TO_TIME('01:02:03')` into casts + if len(args) == 1 and value.is_string and not int_value: + return exp.cast(value, kind) - if first_arg.is_string: - if is_int(first_arg.this): - # case: <integer> - return exp.UnixToTime.from_arg_list(args) + # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special + # cases so we can transpile them, since they're relatively common + if kind == exp.DataType.Type.TIMESTAMP: + if int_value: + return exp.UnixToTime(this=value, scale=seq_get(args, 1)) + if not is_float(value.this): + return build_formatted_time(exp.StrToTime, "snowflake")(args) - # case: <date_expr> - return build_formatted_time(exp.StrToTime, "snowflake", default=True)(args) + return exp.Anonymous(this=name, expressions=args) - # case: <numeric_expr> - return exp.UnixToTime.from_arg_list(args) + return _builder def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]: @@ -364,7 +358,13 @@ class Snowflake(Dialect): precision=seq_get(args, 2), scale=seq_get(args, 3), ), - "TO_TIMESTAMP": _build_to_timestamp, + "TO_TIME": _build_timestamp("TO_TIME", exp.DataType.Type.TIME), + "TO_TIMESTAMP": _build_timestamp("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP), + "TO_TIMESTAMP_LTZ": _build_timestamp( + "TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ + ), + "TO_TIMESTAMP_NTZ": _build_timestamp("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP), + "TO_TIMESTAMP_TZ": _build_timestamp("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ), "TO_VARCHAR": exp.ToChar.from_arg_list, "ZEROIFNULL": _build_if_from_zeroifnull, } diff --git a/sqlglot/executor/env.py b/sqlglot/executor/env.py index 0d0d4813..c51049bb 100644 --- a/sqlglot/executor/env.py +++ b/sqlglot/executor/env.py @@ -106,6 +106,13 @@ def cast(this, to): return this if isinstance(this, str): return datetime.date.fromisoformat(this) + if to == exp.DataType.Type.TIME: + if isinstance(this, datetime.datetime): + return this.time() + if isinstance(this, datetime.time): + return this + if isinstance(this, str): + return datetime.time.fromisoformat(this) if to in (exp.DataType.Type.DATETIME, exp.DataType.Type.TIMESTAMP): if isinstance(this, datetime.datetime): return this diff --git a/sqlglot/helper.py b/sqlglot/helper.py index 0d4547fa..bcc68c3c 100644 --- a/sqlglot/helper.py +++ b/sqlglot/helper.py @@ -317,8 +317,16 @@ def find_new_name(taken: t.Collection[str], base: str) -> str: def is_int(text: str) -> bool: + return is_type(text, int) + + +def is_float(text: str) -> bool: + return is_type(text, float) + + +def is_type(text: str, target_type: t.Type) -> bool: try: - int(text) + target_type(text) return True except ValueError: return False
tobymao/sqlglot
c01ff44b036526807624ba2d1f4b247081e8c56f
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index e4cec3a9..9d5a93be 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -40,6 +40,12 @@ WHERE )""", ) + self.validate_identity("SELECT TO_TIMESTAMP(123.4)").selects[0].assert_is(exp.Anonymous) + self.validate_identity("SELECT TO_TIME(x) FROM t") + self.validate_identity("SELECT TO_TIMESTAMP(x) FROM t") + self.validate_identity("SELECT TO_TIMESTAMP_NTZ(x) FROM t") + self.validate_identity("SELECT TO_TIMESTAMP_LTZ(x) FROM t") + self.validate_identity("SELECT TO_TIMESTAMP_TZ(x) FROM t") self.validate_identity("TO_DECIMAL(expr, fmt, precision, scale)") self.validate_identity("ALTER TABLE authors ADD CONSTRAINT c1 UNIQUE (id, email)") self.validate_identity("RM @parquet_stage", check_command_warning=True) @@ -198,10 +204,6 @@ WHERE "SELECT {fn CEILING(5.3)}", "SELECT CEIL(5.3)", ) - self.validate_identity( - "SELECT TO_TIMESTAMP(x) FROM t", - "SELECT CAST(x AS TIMESTAMPNTZ) FROM t", - ) self.validate_identity( "CAST(x AS BYTEINT)", "CAST(x AS INT)", @@ -632,9 +634,16 @@ WHERE self.validate_all( "SELECT TO_TIMESTAMP('2013-04-05 01:02:03')", write={ - "bigquery": "SELECT PARSE_TIMESTAMP('%Y-%m-%d %H:%M:%S', '2013-04-05 01:02:03')", - "snowflake": "SELECT TO_TIMESTAMP('2013-04-05 01:02:03', 'yyyy-mm-DD hh24:mi:ss')", - "spark": "SELECT TO_TIMESTAMP('2013-04-05 01:02:03', 'yyyy-MM-dd HH:mm:ss')", + "bigquery": "SELECT CAST('2013-04-05 01:02:03' AS DATETIME)", + "snowflake": "SELECT CAST('2013-04-05 01:02:03' AS TIMESTAMPNTZ)", + "spark": "SELECT CAST('2013-04-05 01:02:03' AS TIMESTAMP)", + }, + ) + self.validate_all( + "SELECT TO_TIME('12:05:00')", + write={ + "bigquery": "SELECT CAST('12:05:00' AS TIME)", + "snowflake": "SELECT CAST('12:05:00' AS TIME)", }, ) self.validate_all( diff --git a/tests/test_executor.py b/tests/test_executor.py index 4b81359c..1eaca14f 100644 --- a/tests/test_executor.py +++ b/tests/test_executor.py @@ -1,7 +1,7 @@ import os import datetime import unittest -from datetime import date +from datetime import date, time from multiprocessing import Pool import duckdb @@ -640,6 +640,7 @@ class TestExecutor(unittest.TestCase): ("CAST(1 AS TEXT)", "1"), ("CAST('1' AS LONG)", 1), ("CAST('1.1' AS FLOAT)", 1.1), + ("CAST('12:05:01' AS TIME)", time(12, 5, 1)), ("COALESCE(NULL)", None), ("COALESCE(NULL, NULL)", None), ("COALESCE(NULL, 'b')", "b"),
Support for Snowflake TO_TIME , TIME **Is your feature request related to a problem? Please describe.** The executor is not able to run queries containing `TO_TIME` , or `TIME` calls when the reader is set to `sqlglot.dialects.Snowflake` Example: ```python def test_time_function(): query = "SELECT TO_TIME('13:30:00');" assert executor.execute(query, read=Snowflake) ``` raises: ``` > raise ExecuteError(f"Step '{node.id}' failed: {e}") from e E sqlglot.errors.ExecuteError: Step 'Scan: (4541617904)' failed: name 'TO_TIME' is not defined ``` **Describe the solution you'd like** I would like support for this function **Describe alternatives you've considered** I would gladly take on authoring a PR (including testing) if documentation was provided for how to implement this feature. Even if those steps were provided in this GH issue I could add them to the readme **Additional context** N/A
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake", "tests/test_executor.py::TestExecutor::test_scalar_functions" ]
[ "tests/dialects/test_snowflake.py::TestSnowflake::test_ddl", "tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table", "tests/dialects/test_snowflake.py::TestSnowflake::test_flatten", "tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data", "tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize", "tests/dialects/test_snowflake.py::TestSnowflake::test_minus", "tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment", "tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace", "tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr", "tests/dialects/test_snowflake.py::TestSnowflake::test_sample", "tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_columns", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_imported_keys", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_objects", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_primary_keys", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_schemas", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_sequences", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_tables", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_unique_keys", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_users", "tests/dialects/test_snowflake.py::TestSnowflake::test_show_views", "tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files", "tests/dialects/test_snowflake.py::TestSnowflake::test_storage_integration", "tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures", "tests/dialects/test_snowflake.py::TestSnowflake::test_swap", "tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal", "tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps", "tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast", "tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions", "tests/dialects/test_snowflake.py::TestSnowflake::test_values", "tests/test_executor.py::TestExecutor::test_aggregate_without_group_by", "tests/test_executor.py::TestExecutor::test_case_sensitivity", "tests/test_executor.py::TestExecutor::test_correlated_count", "tests/test_executor.py::TestExecutor::test_execute_callable", "tests/test_executor.py::TestExecutor::test_execute_catalog_db_table", "tests/test_executor.py::TestExecutor::test_execute_subqueries", "tests/test_executor.py::TestExecutor::test_execute_tables", "tests/test_executor.py::TestExecutor::test_execute_tpcds", "tests/test_executor.py::TestExecutor::test_execute_tpch", "tests/test_executor.py::TestExecutor::test_group_by", "tests/test_executor.py::TestExecutor::test_nested_table_reference", "tests/test_executor.py::TestExecutor::test_nested_values", "tests/test_executor.py::TestExecutor::test_optimized_tpch", "tests/test_executor.py::TestExecutor::test_py_dialect", "tests/test_executor.py::TestExecutor::test_set_operations", "tests/test_executor.py::TestExecutor::test_static_queries", "tests/test_executor.py::TestExecutor::test_table_depth_mismatch", "tests/test_executor.py::TestExecutor::test_tables" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-12T20:41:15Z"
mit
tobymao__sqlglot-3162
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index b19dc852..c0c39030 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1931,6 +1931,7 @@ class Insert(DDL, DML): arg_types = { "hint": False, "with": False, + "is_function": False, "this": True, "expression": False, "conflict": False, diff --git a/sqlglot/generator.py b/sqlglot/generator.py index a61b4b75..077e5ff0 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1512,7 +1512,9 @@ class Generator(metaclass=_Generator): alternative = expression.args.get("alternative") alternative = f" OR {alternative}" if alternative else "" ignore = " IGNORE" if expression.args.get("ignore") else "" - + is_function = expression.args.get("is_function") + if is_function: + this = f"{this} FUNCTION" this = f"{this} {self.sql(expression, 'this')}" exists = " IF EXISTS" if expression.args.get("exists") else "" diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 60364141..d934b4c6 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -2185,7 +2185,9 @@ class Parser(metaclass=_Parser): self._match(TokenType.INTO) comments += ensure_list(self._prev_comments) self._match(TokenType.TABLE) - this = self._parse_table(schema=True) + is_function = self._match(TokenType.FUNCTION) + + this = self._parse_table(schema=True) if not is_function else self._parse_function() returning = self._parse_returning() @@ -2193,6 +2195,7 @@ class Parser(metaclass=_Parser): exp.Insert, comments=comments, hint=hint, + is_function=is_function, this=this, by_name=self._match_text_seq("BY", "NAME"), exists=self._parse_exists(),
tobymao/sqlglot
706fac382fbde6c1c6af8acd277291a3f18f94ee
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py index edf3da12..8a40899a 100644 --- a/tests/dialects/test_clickhouse.py +++ b/tests/dialects/test_clickhouse.py @@ -390,6 +390,17 @@ class TestClickhouse(Validator): ) self.validate_identity("SYSTEM STOP MERGES foo.bar", check_command_warning=True) + self.validate_identity( + "INSERT INTO FUNCTION s3('url', 'CSV', 'name String, value UInt32', 'gzip') SELECT name, value FROM existing_table" + ) + self.validate_identity( + "INSERT INTO FUNCTION remote('localhost', default.simple_table) VALUES (100, 'inserted via remote()')" + ) + self.validate_identity( + """INSERT INTO TABLE FUNCTION hdfs('hdfs://hdfs1:9000/test', 'TSV', 'name String, column2 UInt32, column3 UInt32') VALUES ('test', 1, 2)""", + """INSERT INTO FUNCTION hdfs('hdfs://hdfs1:9000/test', 'TSV', 'name String, column2 UInt32, column3 UInt32') VALUES ('test', 1, 2)""", + ) + def test_cte(self): self.validate_identity("WITH 'x' AS foo SELECT foo") self.validate_identity("WITH ['c'] AS field_names SELECT field_names")
Clickhouse INSERT INTO FUNCTION s3 raises ParseError **Fully reproducible code snippet** ``` import unittest from sqlglot import parse_one from sqlglot.dialects import ClickHouse class TestClickhouseInsertIntoS3Select(unittest.TestCase): def test_parse_one_insert_into_s3_select(self): sql = """ INSERT INTO FUNCTION s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') SELECT name, value FROM existing_table; """ ast = parse_one(sql=sql, dialect=ClickHouse) self.assertIsNotNone(ast) if __name__ == '__main__': unittest.main() ``` **Exception** ``` sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 2, Col: 31. INSERT INTO FUNCTION s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/test-data.csv.gz', 'CSV', ' ``` **Official Documentation** SQL statement example taken from: https://clickhouse.com/docs/en/sql-reference/table-functions/s3 - > Insert data into file test-data.csv.gz from existing table Thanks in advance.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse" ]
[ "tests/dialects/test_clickhouse.py::TestClickhouse::test_cte", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl", "tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization", "tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types", "tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-18T14:17:33Z"
mit
tobymao__sqlglot-3166
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 7a8fef4e..21269329 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -621,7 +621,7 @@ class Expression(metaclass=_Expression): return expression key = self.arg_key - value = parent.args[key] + value = parent.args.get(key) if isinstance(value, list): index = self.index @@ -639,7 +639,7 @@ class Expression(metaclass=_Expression): else: value[index] = expression parent._set_parent(key, expression, index=index) - else: + elif value is not None: if expression is None: parent.args.pop(key) else:
tobymao/sqlglot
cdbe39e7ec36b30e211942b1f62ae86946c7b520
diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 71c65fae..984ec22b 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -342,6 +342,9 @@ class TestOptimizer(unittest.TestCase): def test_simplify(self): self.check_file("simplify", simplify, set_dialect=True) + expression = parse_one("SELECT a, c, b FROM table1 WHERE 1 = 1") + self.assertEqual(simplify(simplify(expression.find(exp.Where))).sql(), "WHERE TRUE") + expression = parse_one("TRUE AND TRUE AND TRUE") self.assertEqual(exp.true(), optimizer.simplify.simplify(expression)) self.assertEqual(exp.true(), optimizer.simplify.simplify(expression.this))
running simplify multiple times fails **Fully reproducible code snippet** Please include a fully reproducible code snippet or the input sql, dialect, and expected output. ``` >>> from sqlglot.optimizer.scope import exp >>> from sqlglot.optimizer.simplify import simplify >>> from sqlglot import parse_one >>> ast=parse_one("SELECT a,c,b from table1 where 1 = 1;") >>> w = ast.find(exp.Where) >>> w2 = simplify(w) >>> w2 = simplify(w) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "lib/python3.9/site-packages/sqlglot/optimizer/simplify.py", line 109, in simplify expression = while_changing(expression, _simplify) File "lib/python3.9/site-packages/sqlglot/helper.py", line 211, in while_changing expression = func(expression) File "lib/python3.9/site-packages/sqlglot/optimizer/simplify.py", line 106, in _simplify expression.replace(node) File "lib/python3.9/site-packages/sqlglot/expressions.py", line 624, in replace value = parent.args[key] KeyError: 'where' >>> ``` in my code i run simplify multiple times as I reduce some elements of a query. in this case, elements in the where clause. as you see in the sample code, first time works fine but then the where clause was removed and the code fails. This used to work before, so I guess some check that they key exists has been removed.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_optimizer.py::TestOptimizer::test_simplify" ]
[ "tests/test_optimizer.py::TestOptimizer::test_aggfunc_annotation", "tests/test_optimizer.py::TestOptimizer::test_binary_annotation", "tests/test_optimizer.py::TestOptimizer::test_boolean_type_annotation", "tests/test_optimizer.py::TestOptimizer::test_bracket_annotation", "tests/test_optimizer.py::TestOptimizer::test_cache_annotation", "tests/test_optimizer.py::TestOptimizer::test_canonicalize", "tests/test_optimizer.py::TestOptimizer::test_cast_type_annotation", "tests/test_optimizer.py::TestOptimizer::test_concat_annotation", "tests/test_optimizer.py::TestOptimizer::test_cte_column_annotation", "tests/test_optimizer.py::TestOptimizer::test_derived_tables_column_annotation", "tests/test_optimizer.py::TestOptimizer::test_eliminate_ctes", "tests/test_optimizer.py::TestOptimizer::test_eliminate_joins", "tests/test_optimizer.py::TestOptimizer::test_eliminate_subqueries", "tests/test_optimizer.py::TestOptimizer::test_expand_alias_refs", "tests/test_optimizer.py::TestOptimizer::test_file_schema", "tests/test_optimizer.py::TestOptimizer::test_function_annotation", "tests/test_optimizer.py::TestOptimizer::test_interval_math_annotation", "tests/test_optimizer.py::TestOptimizer::test_isolate_table_selects", "tests/test_optimizer.py::TestOptimizer::test_lateral_annotation", "tests/test_optimizer.py::TestOptimizer::test_literal_type_annotation", "tests/test_optimizer.py::TestOptimizer::test_nested_type_annotation", "tests/test_optimizer.py::TestOptimizer::test_no_pseudocolumn_expansion", "tests/test_optimizer.py::TestOptimizer::test_normalize", "tests/test_optimizer.py::TestOptimizer::test_normalize_identifiers", "tests/test_optimizer.py::TestOptimizer::test_null_annotation", "tests/test_optimizer.py::TestOptimizer::test_nullable_annotation", "tests/test_optimizer.py::TestOptimizer::test_optimize", "tests/test_optimizer.py::TestOptimizer::test_optimize_joins", "tests/test_optimizer.py::TestOptimizer::test_predicate_annotation", "tests/test_optimizer.py::TestOptimizer::test_pushdown_cte_alias_columns", "tests/test_optimizer.py::TestOptimizer::test_pushdown_predicates", "tests/test_optimizer.py::TestOptimizer::test_pushdown_projection", "tests/test_optimizer.py::TestOptimizer::test_qualify_columns", "tests/test_optimizer.py::TestOptimizer::test_qualify_columns__invalid", "tests/test_optimizer.py::TestOptimizer::test_qualify_columns__with_invisible", "tests/test_optimizer.py::TestOptimizer::test_qualify_tables", "tests/test_optimizer.py::TestOptimizer::test_quote_identifiers", "tests/test_optimizer.py::TestOptimizer::test_quotes", "tests/test_optimizer.py::TestOptimizer::test_recursive_cte", "tests/test_optimizer.py::TestOptimizer::test_root_subquery_annotation", "tests/test_optimizer.py::TestOptimizer::test_schema_with_spaces", "tests/test_optimizer.py::TestOptimizer::test_scope", "tests/test_optimizer.py::TestOptimizer::test_scope_warning", "tests/test_optimizer.py::TestOptimizer::test_semistructured", "tests/test_optimizer.py::TestOptimizer::test_struct_type_annotation", "tests/test_optimizer.py::TestOptimizer::test_tpcds", "tests/test_optimizer.py::TestOptimizer::test_tpch", "tests/test_optimizer.py::TestOptimizer::test_type_annotation_cache", "tests/test_optimizer.py::TestOptimizer::test_typeddiv_annotation", "tests/test_optimizer.py::TestOptimizer::test_unknown_annotation", "tests/test_optimizer.py::TestOptimizer::test_unnest_annotation", "tests/test_optimizer.py::TestOptimizer::test_unnest_subqueries", "tests/test_optimizer.py::TestOptimizer::test_user_defined_type_annotation" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2024-03-19T00:00:29Z"
mit
tobymao__sqlglot-3171
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 7ef75ac3..6f2d7603 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1404,7 +1404,12 @@ class WithinGroup(Expression): # clickhouse supports scalar ctes # https://clickhouse.com/docs/en/sql-reference/statements/select/with class CTE(DerivedTable): - arg_types = {"this": True, "alias": True, "scalar": False} + arg_types = { + "this": True, + "alias": True, + "scalar": False, + "materialized": False, + } class TableAlias(Expression): diff --git a/sqlglot/generator.py b/sqlglot/generator.py index a6fa9a2a..804df019 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1049,7 +1049,14 @@ class Generator(metaclass=_Generator): def cte_sql(self, expression: exp.CTE) -> str: alias = self.sql(expression, "alias") - return f"{alias} AS {self.wrap(expression)}" + + materialized = expression.args.get("materialized") + if materialized is False: + materialized = "NOT MATERIALIZED " + elif materialized: + materialized = "MATERIALIZED " + + return f"{alias} AS {materialized or ''}{self.wrap(expression)}" def tablealias_sql(self, expression: exp.TableAlias) -> str: alias = self.sql(expression, "this") diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 0c7e9957..208f3364 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -2546,8 +2546,19 @@ class Parser(metaclass=_Parser): self.raise_error("Expected CTE to have alias") self._match(TokenType.ALIAS) + + if self._match_text_seq("NOT", "MATERIALIZED"): + materialized = False + elif self._match_text_seq("MATERIALIZED"): + materialized = True + else: + materialized = None + return self.expression( - exp.CTE, this=self._parse_wrapped(self._parse_statement), alias=alias + exp.CTE, + this=self._parse_wrapped(self._parse_statement), + alias=alias, + materialized=materialized, ) def _parse_table_alias(
tobymao/sqlglot
d859fc0f6eeb0971dab5b22748d1e84425829444
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 77c42731..e2a153f5 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -40,13 +40,6 @@ class TestPostgres(Validator): self.validate_identity("CAST(x AS DATEMULTIRANGE)") self.validate_identity("SELECT ARRAY[1, 2, 3] @> ARRAY[1, 2]") self.validate_identity("SELECT ARRAY[1, 2, 3] <@ ARRAY[1, 2]") - self.validate_all( - "SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]", - write={ - "": "SELECT ARRAY_OVERLAPS(ARRAY(1, 2, 3), ARRAY(1, 2))", - "postgres": "SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]", - }, - ) self.validate_identity("x$") self.validate_identity("SELECT ARRAY[1, 2, 3]") self.validate_identity("SELECT ARRAY(SELECT 1)") @@ -70,6 +63,9 @@ class TestPostgres(Validator): self.validate_identity("EXEC AS myfunc @id = 123", check_command_warning=True) self.validate_identity("SELECT CURRENT_USER") self.validate_identity("SELECT * FROM ONLY t1") + self.validate_identity( + "WITH t1 AS MATERIALIZED (SELECT 1), t2 AS NOT MATERIALIZED (SELECT 2) SELECT * FROM t1, t2" + ) self.validate_identity( """LAST_VALUE("col1") OVER (ORDER BY "col2" RANGE BETWEEN INTERVAL '1 DAY' PRECEDING AND '1 month' FOLLOWING)""" ) @@ -310,6 +306,13 @@ class TestPostgres(Validator): ) self.validate_identity("SELECT * FROM t1*", "SELECT * FROM t1") + self.validate_all( + "SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]", + write={ + "": "SELECT ARRAY_OVERLAPS(ARRAY(1, 2, 3), ARRAY(1, 2))", + "postgres": "SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]", + }, + ) self.validate_all( "SELECT JSON_EXTRACT_PATH_TEXT(x, k1, k2, k3) FROM t", read={
`WITH foo AS MATERIALIZED (` fails in Postgres This is a valid Postgres query: ```sql with data as materialized ( select a.n/100 as x, 10*a.n/30 as y from generate_series(1, 1000) as a(n) ) select * from data ``` But it fails to parse because of `materialized`: ```python from sqlglot import parse_one # using 23.0.1 p = parse_one( """ with data as materialized ( select a.n/100 as x, 10*a.n/30 as y from generate_series(1, 1000) as a(n) ) select * from data """, dialect="postgres", ) ``` ```bash $ python test.py Traceback (most recent call last): File "/Users/beto/Projects/github/superset/test.py", line 3, in <module> p = parse_one( File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/__init__.py", line 124, in parse_one result = dialect.parse(sql, **opts) File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/dialects/dialect.py", line 490, in parse return self.parser(**opts).parse(self.tokenize(sql), sql) File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 1153, in parse return self._parse( File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 1219, in _parse expressions.append(parse_method(self)) File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 1427, in _parse_statement expression = self._parse_set_operations(expression) if expression else self._parse_select() File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 2426, in _parse_select cte = self._parse_with() File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 2532, in _parse_with expressions.append(self._parse_cte()) File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 2550, in _parse_cte exp.CTE, this=self._parse_wrapped(self._parse_statement), alias=alias File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 5548, in _parse_wrapped self.raise_error("Expecting (") File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 1263, in raise_error raise error sqlglot.errors.ParseError: Expecting (. Line 2, Col: 25. with data as materialized ( select a.n/100 as x, 10*a.n/30 as y from generate_series(1, 1000) as a(n) ) select * from data ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_postgres" ]
[ "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_ddl", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_unnest_json_array", "tests/dialects/test_postgres.py::TestPostgres::test_variance" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2024-03-19T18:55:41Z"
mit
tobymao__sqlglot-3203
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py index a41b6ea8..70066677 100644 --- a/sqlglot/dialects/redshift.py +++ b/sqlglot/dialects/redshift.py @@ -171,6 +171,8 @@ class Redshift(Postgres): ), exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", + exp.StartsWith: lambda self, + e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", exp.TableSample: no_tablesample_sql, exp.TsOrDsAdd: date_delta_sql("DATEADD"), exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index da206544..0cbaf20e 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -2394,13 +2394,7 @@ class OutputModelProperty(Property): class IsolatedLoadingProperty(Property): - arg_types = { - "no": False, - "concurrent": False, - "for_all": False, - "for_insert": False, - "for_none": False, - } + arg_types = {"no": False, "concurrent": False, "target": False} class JournalProperty(Property): @@ -2608,6 +2602,11 @@ class UnloggedProperty(Property): arg_types = {} +# https://learn.microsoft.com/en-us/sql/t-sql/statements/create-view-transact-sql?view=sql-server-ver16 +class ViewAttributeProperty(Property): + arg_types = {"this": True} + + class VolatileProperty(Property): arg_types = {"this": False} diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 804df019..721efb61 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -46,9 +46,11 @@ class Generator(metaclass=_Generator): 'safe': Only quote identifiers that are case insensitive. normalize: Whether to normalize identifiers to lowercase. Default: False. - pad: The pad size in a formatted string. + pad: The pad size in a formatted string. For example, this affects the indentation of + a projection in a query, relative to its nesting level. Default: 2. - indent: The indentation size in a formatted string. + indent: The indentation size in a formatted string. For example, this affects the + indentation of subqueries and filters under a `WHERE` clause. Default: 2. normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. @@ -141,6 +143,7 @@ class Generator(metaclass=_Generator): exp.UppercaseColumnConstraint: lambda *_: "UPPERCASE", exp.UnloggedProperty: lambda *_: "UNLOGGED", exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]), + exp.ViewAttributeProperty: lambda self, e: f"WITH {self.sql(e, 'this')}", exp.VolatileProperty: lambda *_: "VOLATILE", exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}", exp.WithOperator: lambda self, e: f"{self.sql(e, 'this')} WITH {self.sql(e, 'op')}", @@ -451,6 +454,7 @@ class Generator(metaclass=_Generator): exp.TransformModelProperty: exp.Properties.Location.POST_SCHEMA, exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA, exp.UnloggedProperty: exp.Properties.Location.POST_CREATE, + exp.ViewAttributeProperty: exp.Properties.Location.POST_SCHEMA, exp.VolatileProperty: exp.Properties.Location.POST_CREATE, exp.WithDataProperty: exp.Properties.Location.POST_EXPRESSION, exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME, @@ -1442,15 +1446,9 @@ class Generator(metaclass=_Generator): no = " NO" if no else "" concurrent = expression.args.get("concurrent") concurrent = " CONCURRENT" if concurrent else "" - - for_ = "" - if expression.args.get("for_all"): - for_ = " FOR ALL" - elif expression.args.get("for_insert"): - for_ = " FOR INSERT" - elif expression.args.get("for_none"): - for_ = " FOR NONE" - return f"WITH{no}{concurrent} ISOLATED LOADING{for_}" + target = self.sql(expression, "target") + target = f" {target}" if target else "" + return f"WITH{no}{concurrent} ISOLATED LOADING{target}" def partitionboundspec_sql(self, expression: exp.PartitionBoundSpec) -> str: if isinstance(expression.this, list): @@ -3221,7 +3219,7 @@ class Generator(metaclass=_Generator): num_sqls = len(expressions) # These are calculated once in case we have the leading_comma / pretty option set, correspondingly - pad = " " * self.pad + pad = " " * len(sep) stripped_sep = sep.strip() result_sqls = [] diff --git a/sqlglot/parser.py b/sqlglot/parser.py index b33af74a..be0b1084 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -1026,6 +1026,8 @@ class Parser(metaclass=_Parser): ), } + ISOLATED_LOADING_OPTIONS: OPTIONS_TYPE = {"FOR": ("ALL", "INSERT", "NONE")} + USABLES: OPTIONS_TYPE = dict.fromkeys(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"), tuple()) CAST_ACTIONS: OPTIONS_TYPE = dict.fromkeys(("RENAME", "ADD"), ("FIELDS",)) @@ -1041,6 +1043,8 @@ class Parser(metaclass=_Parser): TABLE_INDEX_HINT_TOKENS = {TokenType.FORCE, TokenType.IGNORE, TokenType.USE} + VIEW_ATTRIBUTES = {"ENCRYPTION", "SCHEMABINDING", "VIEW_METADATA"} + WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS} WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER} WINDOW_SIDES = {"FOLLOWING", "PRECEDING"} @@ -1798,15 +1802,16 @@ class Parser(metaclass=_Parser): return prop - def _parse_with_property( - self, - ) -> t.Optional[exp.Expression] | t.List[exp.Expression]: + def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]: if self._match(TokenType.L_PAREN, advance=False): return self._parse_wrapped_properties() if self._match_text_seq("JOURNAL"): return self._parse_withjournaltable() + if self._match_texts(self.VIEW_ATTRIBUTES): + return self.expression(exp.ViewAttributeProperty, this=self._prev.text.upper()) + if self._match_text_seq("DATA"): return self._parse_withdata(no=False) elif self._match_text_seq("NO", "DATA"): @@ -1954,20 +1959,18 @@ class Parser(metaclass=_Parser): autotemp=autotemp, ) - def _parse_withisolatedloading(self) -> exp.IsolatedLoadingProperty: + def _parse_withisolatedloading(self) -> t.Optional[exp.IsolatedLoadingProperty]: + index = self._index no = self._match_text_seq("NO") concurrent = self._match_text_seq("CONCURRENT") - self._match_text_seq("ISOLATED", "LOADING") - for_all = self._match_text_seq("FOR", "ALL") - for_insert = self._match_text_seq("FOR", "INSERT") - for_none = self._match_text_seq("FOR", "NONE") + + if not self._match_text_seq("ISOLATED", "LOADING"): + self._retreat(index) + return None + + target = self._parse_var_from_options(self.ISOLATED_LOADING_OPTIONS, raise_unmatched=False) return self.expression( - exp.IsolatedLoadingProperty, - no=no, - concurrent=concurrent, - for_all=for_all, - for_insert=for_insert, - for_none=for_none, + exp.IsolatedLoadingProperty, no=no, concurrent=concurrent, target=target ) def _parse_locking(self) -> exp.LockingProperty: diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index 824ebe74..1ba8b2ad 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -565,8 +565,7 @@ class Tokenizer(metaclass=_Tokenizer): "~": TokenType.TILDA, "?": TokenType.PLACEHOLDER, "@": TokenType.PARAMETER, - # used for breaking a var like x'y' but nothing else - # the token type doesn't matter + # Used for breaking a var like x'y' but nothing else the token type doesn't matter "'": TokenType.QUOTE, "`": TokenType.IDENTIFIER, '"': TokenType.IDENTIFIER, @@ -892,7 +891,7 @@ class Tokenizer(metaclass=_Tokenizer): COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} - # handle numeric literals like in hive (3L = BIGINT) + # Handle numeric literals like in hive (3L = BIGINT) NUMERIC_LITERALS: t.Dict[str, str] = {} COMMENTS = ["--", ("/*", "*/")] @@ -965,8 +964,7 @@ class Tokenizer(metaclass=_Tokenizer): while self.size and not self._end: current = self._current - # skip spaces inline rather than iteratively call advance() - # for performance reasons + # Skip spaces here rather than iteratively calling advance() for performance reasons while current < self.size: char = self.sql[current] @@ -975,12 +973,10 @@ class Tokenizer(metaclass=_Tokenizer): else: break - n = current - self._current - self._start = current - self._advance(n if n > 1 else 1) + offset = current - self._current if current > self._current else 1 - if self._char is None: - break + self._start = current + self._advance(offset) if not self._char.isspace(): if self._char.isdigit(): @@ -1008,12 +1004,9 @@ class Tokenizer(metaclass=_Tokenizer): def _advance(self, i: int = 1, alnum: bool = False) -> None: if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: # Ensures we don't count an extra line if we get a \r\n line break sequence - if self._char == "\r" and self._peek == "\n": - i = 2 - self._start += 1 - - self._col = 1 - self._line += 1 + if not (self._char == "\r" and self._peek == "\n"): + self._col = 1 + self._line += 1 else: self._col += i diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs index 2c90a650..881417e5 100644 --- a/sqlglotrs/src/tokenizer.rs +++ b/sqlglotrs/src/tokenizer.rs @@ -118,8 +118,27 @@ impl<'a> TokenizerState<'a> { fn scan(&mut self, until_peek_char: Option<char>) -> Result<(), TokenizerError> { while self.size > 0 && !self.is_end { - self.start = self.current; - self.advance(1)?; + let mut current = self.current; + + // Skip spaces here rather than iteratively calling advance() for performance reasons + while current < self.size { + let ch = self.char_at(current)?; + + if ch == ' ' || ch == '\t' { + current += 1; + } else { + break; + } + } + + let offset = if current > self.current { + current - self.current + } else { + 1 + }; + + self.start = current; + self.advance(offset as isize)?; if self.current_char == '\0' { break; @@ -153,16 +172,12 @@ impl<'a> TokenizerState<'a> { } fn advance(&mut self, i: isize) -> Result<(), TokenizerError> { - let mut i = i; if Some(&self.token_types.break_) == self.settings.white_space.get(&self.current_char) { // Ensures we don't count an extra line if we get a \r\n line break sequence. - if self.current_char == '\r' && self.peek_char == '\n' { - i = 2; - self.start += 1; + if ! (self.current_char == '\r' && self.peek_char == '\n') { + self.column = 1; + self.line += 1; } - - self.column = 1; - self.line += 1; } else { self.column = self.column.wrapping_add_signed(i); }
tobymao/sqlglot
3620b9974c28df7d4d189ebd5fdcb675f41a275d
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py index 896ee451..7affe31f 100644 --- a/tests/dialects/test_redshift.py +++ b/tests/dialects/test_redshift.py @@ -139,6 +139,15 @@ class TestRedshift(Validator): "presto": "LENGTH(x)", }, ) + self.validate_all( + "x LIKE 'abc' || '%'", + read={ + "duckdb": "STARTS_WITH(x, 'abc')", + }, + write={ + "redshift": "x LIKE 'abc' || '%'", + }, + ) self.validate_all( "SELECT SYSDATE", diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py index 4efd7b91..aefd8575 100644 --- a/tests/dialects/test_tsql.py +++ b/tests/dialects/test_tsql.py @@ -742,6 +742,9 @@ class TestTSQL(Validator): ) def test_ddl(self): + for view_attr in ("ENCRYPTION", "SCHEMABINDING", "VIEW_METADATA"): + self.validate_identity(f"CREATE VIEW a.b WITH {view_attr} AS SELECT * FROM x") + expression = parse_one("ALTER TABLE dbo.DocExe DROP CONSTRAINT FK_Column_B", dialect="tsql") self.assertIsInstance(expression, exp.AlterTable) self.assertIsInstance(expression.args["actions"][0], exp.Drop) diff --git a/tests/test_tokens.py b/tests/test_tokens.py index 970c1ac2..29ef5b61 100644 --- a/tests/test_tokens.py +++ b/tests/test_tokens.py @@ -85,6 +85,18 @@ x""" ], ) + for simple_query in ("SELECT 1\r\n", "\r\nSELECT 1"): + tokens = Tokenizer().tokenize(simple_query) + tokens = [(token.token_type, token.text) for token in tokens] + + self.assertEqual( + tokens, + [ + (TokenType.SELECT, "SELECT"), + (TokenType.NUMBER, "1"), + ], + ) + def test_command(self): tokens = Tokenizer().tokenize("SHOW;") self.assertEqual(tokens[0].token_type, TokenType.SHOW) diff --git a/tests/test_transpile.py b/tests/test_transpile.py index 0170e230..f6fd2f9a 100644 --- a/tests/test_transpile.py +++ b/tests/test_transpile.py @@ -66,6 +66,24 @@ class TestTranspile(unittest.TestCase): ) def test_leading_comma(self): + self.validate( + "SELECT a, b, c FROM (SELECT a, b, c FROM t)", + "SELECT\n" + " a\n" + " , b\n" + " , c\n" + "FROM (\n" + " SELECT\n" + " a\n" + " , b\n" + " , c\n" + " FROM t\n" + ")", + leading_comma=True, + pretty=True, + pad=4, + indent=4, + ) self.validate( "SELECT FOO, BAR, BAZ", "SELECT\n FOO\n , BAR\n , BAZ",
Unable to parse view definition with schemabinding Parsing mssql view definitions for lineage information, I found this common syntax was a source of parser errors: ``` from sqlglot import parse_one, exp # find all tables (x, y, z) print(repr(parse_one("CREATE VIEW a.b WITH SCHEMABINDING AS SELECT * FROM x JOIN y JOIN z"))) ``` Removing the "WITH SCHEMABINDING" element makes it parse correctly. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-view-transact-sql?view=sql-server-ver16 The library is already finding the WITH token and looking for properties, but it does not expect the SCHEMABINDING property at that point. Adding it to the property parsing is pretty simple (happy to submit a PR+test) and with that change the statement is parsed properly.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_redshift.py::TestRedshift::test_redshift", "tests/test_tokens.py::TestTokens::test_crlf", "tests/test_transpile.py::TestTranspile::test_leading_comma" ]
[ "tests/dialects/test_redshift.py::TestRedshift::test_column_unnesting", "tests/dialects/test_redshift.py::TestRedshift::test_create_table_like", "tests/dialects/test_redshift.py::TestRedshift::test_identity", "tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding", "tests/dialects/test_redshift.py::TestRedshift::test_rename_table", "tests/dialects/test_redshift.py::TestRedshift::test_values", "tests/dialects/test_redshift.py::TestRedshift::test_varchar_max", "tests/dialects/test_tsql.py::TestTSQL::test__types_ints", "tests/dialects/test_tsql.py::TestTSQL::test_add_date", "tests/dialects/test_tsql.py::TestTSQL::test_charindex", "tests/dialects/test_tsql.py::TestTSQL::test_commit", "tests/dialects/test_tsql.py::TestTSQL::test_convert", "tests/dialects/test_tsql.py::TestTSQL::test_current_user", "tests/dialects/test_tsql.py::TestTSQL::test_date_diff", "tests/dialects/test_tsql.py::TestTSQL::test_datefromparts", "tests/dialects/test_tsql.py::TestTSQL::test_datename", "tests/dialects/test_tsql.py::TestTSQL::test_datepart", "tests/dialects/test_tsql.py::TestTSQL::test_ddl", "tests/dialects/test_tsql.py::TestTSQL::test_eomonth", "tests/dialects/test_tsql.py::TestTSQL::test_format", "tests/dialects/test_tsql.py::TestTSQL::test_fullproc", "tests/dialects/test_tsql.py::TestTSQL::test_hints", "tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes", "tests/dialects/test_tsql.py::TestTSQL::test_insert_cte", "tests/dialects/test_tsql.py::TestTSQL::test_isnull", "tests/dialects/test_tsql.py::TestTSQL::test_json", "tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery", "tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function", "tests/dialects/test_tsql.py::TestTSQL::test_len", "tests/dialects/test_tsql.py::TestTSQL::test_openjson", "tests/dialects/test_tsql.py::TestTSQL::test_option", "tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords", "tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs", "tests/dialects/test_tsql.py::TestTSQL::test_replicate", "tests/dialects/test_tsql.py::TestTSQL::test_rollback", "tests/dialects/test_tsql.py::TestTSQL::test_set", "tests/dialects/test_tsql.py::TestTSQL::test_string", "tests/dialects/test_tsql.py::TestTSQL::test_system_time", "tests/dialects/test_tsql.py::TestTSQL::test_temp_table", "tests/dialects/test_tsql.py::TestTSQL::test_temporal_table", "tests/dialects/test_tsql.py::TestTSQL::test_top", "tests/dialects/test_tsql.py::TestTSQL::test_transaction", "tests/dialects/test_tsql.py::TestTSQL::test_tsql", "tests/dialects/test_tsql.py::TestTSQL::test_types", "tests/dialects/test_tsql.py::TestTSQL::test_types_bin", "tests/dialects/test_tsql.py::TestTSQL::test_types_date", "tests/dialects/test_tsql.py::TestTSQL::test_types_decimals", "tests/dialects/test_tsql.py::TestTSQL::test_types_string", "tests/dialects/test_tsql.py::TestTSQL::test_udf", "tests/test_tokens.py::TestTokens::test_command", "tests/test_tokens.py::TestTokens::test_comment_attachment", "tests/test_tokens.py::TestTokens::test_error_msg", "tests/test_tokens.py::TestTokens::test_jinja", "tests/test_tokens.py::TestTokens::test_space_keywords", "tests/test_tokens.py::TestTokens::test_token_line_col", "tests/test_transpile.py::TestTranspile::test_alias", "tests/test_transpile.py::TestTranspile::test_alter", "tests/test_transpile.py::TestTranspile::test_command_identity", "tests/test_transpile.py::TestTranspile::test_comments", "tests/test_transpile.py::TestTranspile::test_error_level", "tests/test_transpile.py::TestTranspile::test_extract", "tests/test_transpile.py::TestTranspile::test_identify_lambda", "tests/test_transpile.py::TestTranspile::test_identity", "tests/test_transpile.py::TestTranspile::test_if", "tests/test_transpile.py::TestTranspile::test_index_offset", "tests/test_transpile.py::TestTranspile::test_normalize_name", "tests/test_transpile.py::TestTranspile::test_not_range", "tests/test_transpile.py::TestTranspile::test_paren", "tests/test_transpile.py::TestTranspile::test_partial", "tests/test_transpile.py::TestTranspile::test_pretty", "tests/test_transpile.py::TestTranspile::test_pretty_line_breaks", "tests/test_transpile.py::TestTranspile::test_recursion", "tests/test_transpile.py::TestTranspile::test_some", "tests/test_transpile.py::TestTranspile::test_space", "tests/test_transpile.py::TestTranspile::test_time", "tests/test_transpile.py::TestTranspile::test_types", "tests/test_transpile.py::TestTranspile::test_unary", "tests/test_transpile.py::TestTranspile::test_unsupported_level", "tests/test_transpile.py::TestTranspile::test_weird_chars", "tests/test_transpile.py::TestTranspile::test_with" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-22T16:17:57Z"
mit
tobymao__sqlglot-3204
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py index a41b6ea8..70066677 100644 --- a/sqlglot/dialects/redshift.py +++ b/sqlglot/dialects/redshift.py @@ -171,6 +171,8 @@ class Redshift(Postgres): ), exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", + exp.StartsWith: lambda self, + e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", exp.TableSample: no_tablesample_sql, exp.TsOrDsAdd: date_delta_sql("DATEADD"), exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 804df019..3186becf 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -46,9 +46,11 @@ class Generator(metaclass=_Generator): 'safe': Only quote identifiers that are case insensitive. normalize: Whether to normalize identifiers to lowercase. Default: False. - pad: The pad size in a formatted string. + pad: The pad size in a formatted string. For example, this affects the indentation of + a projection in a query, relative to its nesting level. Default: 2. - indent: The indentation size in a formatted string. + indent: The indentation size in a formatted string. For example, this affects the + indentation of subqueries and filters under a `WHERE` clause. Default: 2. normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. @@ -3221,7 +3223,7 @@ class Generator(metaclass=_Generator): num_sqls = len(expressions) # These are calculated once in case we have the leading_comma / pretty option set, correspondingly - pad = " " * self.pad + pad = " " * len(sep) stripped_sep = sep.strip() result_sqls = [] diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index 824ebe74..1ba8b2ad 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -565,8 +565,7 @@ class Tokenizer(metaclass=_Tokenizer): "~": TokenType.TILDA, "?": TokenType.PLACEHOLDER, "@": TokenType.PARAMETER, - # used for breaking a var like x'y' but nothing else - # the token type doesn't matter + # Used for breaking a var like x'y' but nothing else the token type doesn't matter "'": TokenType.QUOTE, "`": TokenType.IDENTIFIER, '"': TokenType.IDENTIFIER, @@ -892,7 +891,7 @@ class Tokenizer(metaclass=_Tokenizer): COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} - # handle numeric literals like in hive (3L = BIGINT) + # Handle numeric literals like in hive (3L = BIGINT) NUMERIC_LITERALS: t.Dict[str, str] = {} COMMENTS = ["--", ("/*", "*/")] @@ -965,8 +964,7 @@ class Tokenizer(metaclass=_Tokenizer): while self.size and not self._end: current = self._current - # skip spaces inline rather than iteratively call advance() - # for performance reasons + # Skip spaces here rather than iteratively calling advance() for performance reasons while current < self.size: char = self.sql[current] @@ -975,12 +973,10 @@ class Tokenizer(metaclass=_Tokenizer): else: break - n = current - self._current - self._start = current - self._advance(n if n > 1 else 1) + offset = current - self._current if current > self._current else 1 - if self._char is None: - break + self._start = current + self._advance(offset) if not self._char.isspace(): if self._char.isdigit(): @@ -1008,12 +1004,9 @@ class Tokenizer(metaclass=_Tokenizer): def _advance(self, i: int = 1, alnum: bool = False) -> None: if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: # Ensures we don't count an extra line if we get a \r\n line break sequence - if self._char == "\r" and self._peek == "\n": - i = 2 - self._start += 1 - - self._col = 1 - self._line += 1 + if not (self._char == "\r" and self._peek == "\n"): + self._col = 1 + self._line += 1 else: self._col += i diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs index 2c90a650..881417e5 100644 --- a/sqlglotrs/src/tokenizer.rs +++ b/sqlglotrs/src/tokenizer.rs @@ -118,8 +118,27 @@ impl<'a> TokenizerState<'a> { fn scan(&mut self, until_peek_char: Option<char>) -> Result<(), TokenizerError> { while self.size > 0 && !self.is_end { - self.start = self.current; - self.advance(1)?; + let mut current = self.current; + + // Skip spaces here rather than iteratively calling advance() for performance reasons + while current < self.size { + let ch = self.char_at(current)?; + + if ch == ' ' || ch == '\t' { + current += 1; + } else { + break; + } + } + + let offset = if current > self.current { + current - self.current + } else { + 1 + }; + + self.start = current; + self.advance(offset as isize)?; if self.current_char == '\0' { break; @@ -153,16 +172,12 @@ impl<'a> TokenizerState<'a> { } fn advance(&mut self, i: isize) -> Result<(), TokenizerError> { - let mut i = i; if Some(&self.token_types.break_) == self.settings.white_space.get(&self.current_char) { // Ensures we don't count an extra line if we get a \r\n line break sequence. - if self.current_char == '\r' && self.peek_char == '\n' { - i = 2; - self.start += 1; + if ! (self.current_char == '\r' && self.peek_char == '\n') { + self.column = 1; + self.line += 1; } - - self.column = 1; - self.line += 1; } else { self.column = self.column.wrapping_add_signed(i); }
tobymao/sqlglot
3620b9974c28df7d4d189ebd5fdcb675f41a275d
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py index 896ee451..7affe31f 100644 --- a/tests/dialects/test_redshift.py +++ b/tests/dialects/test_redshift.py @@ -139,6 +139,15 @@ class TestRedshift(Validator): "presto": "LENGTH(x)", }, ) + self.validate_all( + "x LIKE 'abc' || '%'", + read={ + "duckdb": "STARTS_WITH(x, 'abc')", + }, + write={ + "redshift": "x LIKE 'abc' || '%'", + }, + ) self.validate_all( "SELECT SYSDATE", diff --git a/tests/test_tokens.py b/tests/test_tokens.py index 970c1ac2..29ef5b61 100644 --- a/tests/test_tokens.py +++ b/tests/test_tokens.py @@ -85,6 +85,18 @@ x""" ], ) + for simple_query in ("SELECT 1\r\n", "\r\nSELECT 1"): + tokens = Tokenizer().tokenize(simple_query) + tokens = [(token.token_type, token.text) for token in tokens] + + self.assertEqual( + tokens, + [ + (TokenType.SELECT, "SELECT"), + (TokenType.NUMBER, "1"), + ], + ) + def test_command(self): tokens = Tokenizer().tokenize("SHOW;") self.assertEqual(tokens[0].token_type, TokenType.SHOW) diff --git a/tests/test_transpile.py b/tests/test_transpile.py index 0170e230..f6fd2f9a 100644 --- a/tests/test_transpile.py +++ b/tests/test_transpile.py @@ -66,6 +66,24 @@ class TestTranspile(unittest.TestCase): ) def test_leading_comma(self): + self.validate( + "SELECT a, b, c FROM (SELECT a, b, c FROM t)", + "SELECT\n" + " a\n" + " , b\n" + " , c\n" + "FROM (\n" + " SELECT\n" + " a\n" + " , b\n" + " , c\n" + " FROM t\n" + ")", + leading_comma=True, + pretty=True, + pad=4, + indent=4, + ) self.validate( "SELECT FOO, BAR, BAZ", "SELECT\n FOO\n , BAR\n , BAZ",
Windows line endings cause IndexOutOfRange during tokenization I am parsing view and stored procedure definitions from windows-based mssql. I am getting failures from IndexOutOfRange and strange parsing contexts where the leading character has been removed. I think I have identified the problem + solution - are you open to a PR (with unit test)? The issue is this: In _advance: https://github.com/tobymao/sqlglot/blob/a18444dbd7ccfc05b189dcb2005c85a1048cc8de/sqlglot/tokens.py#L1008 If it sees \r\n, then it sets i to 2 _current is increased by i _char always looks at position _current-1 This can lead to it looking past the end of the source text, as _current-1 > size I believe the fix is something like changing: https://github.com/tobymao/sqlglot/blob/a18444dbd7ccfc05b189dcb2005c85a1048cc8de/sqlglot/tokens.py#L1022 to ``` if self._end: self._peek = "" self._char = self.sql[self.size - 1] else: self._char = self.sql[self._current - 1] self._peek = self.sql[self._current] ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_redshift.py::TestRedshift::test_redshift", "tests/test_tokens.py::TestTokens::test_crlf", "tests/test_transpile.py::TestTranspile::test_leading_comma" ]
[ "tests/dialects/test_redshift.py::TestRedshift::test_column_unnesting", "tests/dialects/test_redshift.py::TestRedshift::test_create_table_like", "tests/dialects/test_redshift.py::TestRedshift::test_identity", "tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding", "tests/dialects/test_redshift.py::TestRedshift::test_rename_table", "tests/dialects/test_redshift.py::TestRedshift::test_values", "tests/dialects/test_redshift.py::TestRedshift::test_varchar_max", "tests/test_tokens.py::TestTokens::test_command", "tests/test_tokens.py::TestTokens::test_comment_attachment", "tests/test_tokens.py::TestTokens::test_error_msg", "tests/test_tokens.py::TestTokens::test_jinja", "tests/test_tokens.py::TestTokens::test_space_keywords", "tests/test_tokens.py::TestTokens::test_token_line_col", "tests/test_transpile.py::TestTranspile::test_alias", "tests/test_transpile.py::TestTranspile::test_alter", "tests/test_transpile.py::TestTranspile::test_command_identity", "tests/test_transpile.py::TestTranspile::test_comments", "tests/test_transpile.py::TestTranspile::test_error_level", "tests/test_transpile.py::TestTranspile::test_extract", "tests/test_transpile.py::TestTranspile::test_identify_lambda", "tests/test_transpile.py::TestTranspile::test_identity", "tests/test_transpile.py::TestTranspile::test_if", "tests/test_transpile.py::TestTranspile::test_index_offset", "tests/test_transpile.py::TestTranspile::test_normalize_name", "tests/test_transpile.py::TestTranspile::test_not_range", "tests/test_transpile.py::TestTranspile::test_paren", "tests/test_transpile.py::TestTranspile::test_partial", "tests/test_transpile.py::TestTranspile::test_pretty", "tests/test_transpile.py::TestTranspile::test_pretty_line_breaks", "tests/test_transpile.py::TestTranspile::test_recursion", "tests/test_transpile.py::TestTranspile::test_some", "tests/test_transpile.py::TestTranspile::test_space", "tests/test_transpile.py::TestTranspile::test_time", "tests/test_transpile.py::TestTranspile::test_types", "tests/test_transpile.py::TestTranspile::test_unary", "tests/test_transpile.py::TestTranspile::test_unsupported_level", "tests/test_transpile.py::TestTranspile::test_weird_chars", "tests/test_transpile.py::TestTranspile::test_with" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-22T17:58:41Z"
mit
tobymao__sqlglot-3223
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py index 4ea89b21..aef2a759 100644 --- a/sqlglot/dialects/mysql.py +++ b/sqlglot/dialects/mysql.py @@ -291,6 +291,7 @@ class MySQL(Dialect): "DAYOFWEEK": lambda args: exp.DayOfWeek(this=exp.TsOrDsToDate(this=seq_get(args, 0))), "DAYOFYEAR": lambda args: exp.DayOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))), "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)), + "FROM_UNIXTIME": build_formatted_time(exp.UnixToTime, "mysql"), "ISNULL": isnull_to_is_null, "LOCATE": locate_to_strposition, "MAKETIME": exp.TimeFromParts.from_arg_list, @@ -720,6 +721,7 @@ class MySQL(Dialect): exp.TsOrDsAdd: _date_add_sql("ADD"), exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression), exp.TsOrDsToDate: _ts_or_ds_to_date_sql, + exp.UnixToTime: lambda self, e: self.func("FROM_UNIXTIME", e.this, self.format_time(e)), exp.Week: _remove_ts_or_ds_to_date(), exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")), exp.Year: _remove_ts_or_ds_to_date(), diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py index 70066677..1f0c411e 100644 --- a/sqlglot/dialects/redshift.py +++ b/sqlglot/dialects/redshift.py @@ -176,6 +176,8 @@ class Redshift(Postgres): exp.TableSample: no_tablesample_sql, exp.TsOrDsAdd: date_delta_sql("DATEADD"), exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), + exp.UnixToTime: lambda self, + e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", } # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 0cbaf20e..2ec0c3f2 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -5707,7 +5707,14 @@ class UnixToStr(Func): # https://prestodb.io/docs/current/functions/datetime.html # presto has weird zone/hours/minutes class UnixToTime(Func): - arg_types = {"this": True, "scale": False, "zone": False, "hours": False, "minutes": False} + arg_types = { + "this": True, + "scale": False, + "zone": False, + "hours": False, + "minutes": False, + "format": False, + } SECONDS = Literal.number(0) DECIS = Literal.number(1)
tobymao/sqlglot
e7c91584ac7fb35082ebd1d4873f13307ea848af
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py index 23607da6..49552bf5 100644 --- a/tests/dialects/test_mysql.py +++ b/tests/dialects/test_mysql.py @@ -513,9 +513,8 @@ class TestMySQL(Validator): ) def test_mysql_time(self): - self.validate_identity("FROM_UNIXTIME(a, b)") - self.validate_identity("FROM_UNIXTIME(a, b, c)") self.validate_identity("TIME_STR_TO_UNIX(x)", "UNIX_TIMESTAMP(x)") + self.validate_identity("SELECT FROM_UNIXTIME(1711366265, '%Y %D %M')") self.validate_all( "SELECT TO_DAYS(x)", write={ @@ -581,6 +580,17 @@ class TestMySQL(Validator): self.validate_all( "STR_TO_DATE(x, '%Y-%m-%dT%T')", write={"presto": "DATE_PARSE(x, '%Y-%m-%dT%T')"} ) + self.validate_all( + "SELECT FROM_UNIXTIME(col)", + read={ + "postgres": "SELECT TO_TIMESTAMP(col)", + }, + write={ + "mysql": "SELECT FROM_UNIXTIME(col)", + "postgres": "SELECT TO_TIMESTAMP(col)", + "redshift": "SELECT (TIMESTAMP 'epoch' + col * INTERVAL '1 SECOND')", + }, + ) def test_mysql(self): self.validate_all(
function from_unixtime trans error source code: from sqlglot import transpile print(transpile("select from_unixtime(1711366265)", read="mysql", write="postgres")) print(transpile("select from_unixtime(1711366265)", read="mysql", write="redshift")) // output ['SELECT FROM_UNIXTIME(1711366265)'] ['SELECT FROM_UNIXTIME(1711366265)'] but postgres and redshift has no function from_unixtime, will post error
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_mysql.py::TestMySQL::test_mysql_time" ]
[ "tests/dialects/test_mysql.py::TestMySQL::test_bits_literal", "tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions", "tests/dialects/test_mysql.py::TestMySQL::test_convert", "tests/dialects/test_mysql.py::TestMySQL::test_date_format", "tests/dialects/test_mysql.py::TestMySQL::test_ddl", "tests/dialects/test_mysql.py::TestMySQL::test_escape", "tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal", "tests/dialects/test_mysql.py::TestMySQL::test_identity", "tests/dialects/test_mysql.py::TestMySQL::test_introducers", "tests/dialects/test_mysql.py::TestMySQL::test_is_null", "tests/dialects/test_mysql.py::TestMySQL::test_json_object", "tests/dialects/test_mysql.py::TestMySQL::test_match_against", "tests/dialects/test_mysql.py::TestMySQL::test_monthname", "tests/dialects/test_mysql.py::TestMySQL::test_mysql", "tests/dialects/test_mysql.py::TestMySQL::test_safe_div", "tests/dialects/test_mysql.py::TestMySQL::test_set_variable", "tests/dialects/test_mysql.py::TestMySQL::test_show_columns", "tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql", "tests/dialects/test_mysql.py::TestMySQL::test_show_engine", "tests/dialects/test_mysql.py::TestMySQL::test_show_errors", "tests/dialects/test_mysql.py::TestMySQL::test_show_events", "tests/dialects/test_mysql.py::TestMySQL::test_show_grants", "tests/dialects/test_mysql.py::TestMySQL::test_show_index", "tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where", "tests/dialects/test_mysql.py::TestMySQL::test_show_name", "tests/dialects/test_mysql.py::TestMySQL::test_show_processlist", "tests/dialects/test_mysql.py::TestMySQL::test_show_profile", "tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status", "tests/dialects/test_mysql.py::TestMySQL::test_show_simple", "tests/dialects/test_mysql.py::TestMySQL::test_show_tables", "tests/dialects/test_mysql.py::TestMySQL::test_string_literals", "tests/dialects/test_mysql.py::TestMySQL::test_types" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-03-26T10:44:21Z"
mit
tobymao__sqlglot-3326
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 5adbb1e5..d97807a3 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -2063,6 +2063,7 @@ class Insert(DDL, DML): "where": False, "ignore": False, "by_name": False, + "stored": False, } def with_( diff --git a/sqlglot/generator.py b/sqlglot/generator.py index b7da18b3..23b8d9c6 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1520,6 +1520,8 @@ class Generator(metaclass=_Generator): else: this = self.INSERT_OVERWRITE if overwrite else " INTO" + stored = self.sql(expression, "stored") + stored = f" {stored}" if stored else "" alternative = expression.args.get("alternative") alternative = f" OR {alternative}" if alternative else "" ignore = " IGNORE" if expression.args.get("ignore") else "" @@ -1545,7 +1547,7 @@ class Generator(metaclass=_Generator): else: expression_sql = f"{returning}{expression_sql}{on_conflict}" - sql = f"INSERT{hint}{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}" + sql = f"INSERT{hint}{alternative}{ignore}{this}{stored}{by_name}{exists}{partition_sql}{where}{expression_sql}" return self.prepend_ctes(expression, sql) def intersect_sql(self, expression: exp.Intersect) -> str: diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 2aaba600..9c075dc7 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -2248,6 +2248,7 @@ class Parser(metaclass=_Parser): hint=hint, is_function=is_function, this=this, + stored=self._match_text_seq("STORED") and self._parse_stored(), by_name=self._match_text_seq("BY", "NAME"), exists=self._parse_exists(), partition=self._parse_partition(),
tobymao/sqlglot
83cff79633225fe3d8606ec3a5a9e8c1081edd0c
diff --git a/tests/dialects/test_hive.py b/tests/dialects/test_hive.py index 33294ee0..d52510d2 100644 --- a/tests/dialects/test_hive.py +++ b/tests/dialects/test_hive.py @@ -428,6 +428,9 @@ class TestHive(Validator): self.validate_identity( "INSERT OVERWRITE TABLE zipcodes PARTITION(state = 0) VALUES (896, 'US', 'TAMPA', 33607)" ) + self.validate_identity( + "INSERT OVERWRITE DIRECTORY 'x' ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY '' STORED AS TEXTFILE SELECT * FROM `a`.`b`" + ) self.validate_identity( "SELECT a, b, SUM(c) FROM tabl AS t GROUP BY a, b, GROUPING SETS ((a, b), a)" )
Parsing of insert overwrite directory fails in hive dialect **Before you file an issue** [x] Make sure you specify the "read" dialect eg. `parse_one(sql, read="spark")` [x] Make sure you specify the "write" dialect eg. `ast.sql(dialect="duckdb")` [x] Check if the issue still exists on main **Fully reproducible code snippet** ``` q = """INSERT OVERWRITE DIRECTORY 's3a://path' ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY ' ' STORED AS TEXTFILE SELECT * FROM `a`.`b`""" asts = sqlglot.parse(q, dialect='hive') ``` Exception: ``` File /opt/miniconda3/envs/databricks-repos/lib/python3.10/site-packages/sqlglot/parser.py:1170, in Parser.parse(self, raw_tokens, sql) 1156 def parse( 1157 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None 1158 ) -> t.List[t.Optional[exp.Expression]]: 1159 """ 1160 Parses a list of tokens and returns a list of syntax trees, one tree 1161 per parsed SQL statement. (...) 1168 The list of the produced syntax trees. 1169 """ -> 1170 return self._parse( 1171 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql 1172 ) File /opt/miniconda3/envs/databricks-repos/lib/python3.10/site-packages/sqlglot/parser.py:1239, in Parser._parse(self, parse_method, raw_tokens, sql) 1236 expressions.append(parse_method(self)) 1238 if self._index < len(self._tokens): -> 1239 self.raise_error("Invalid expression / Unexpected token") 1241 self.check_errors() 1243 return expressions File /opt/miniconda3/envs/databricks-repos/lib/python3.10/site-packages/sqlglot/parser.py:1280, in Parser.raise_error(self, message, token) 1268 error = ParseError.new( 1269 f"{message}. Line {token.line}, Col: {token.col}.\n" 1270 f" {start_context}\033[4m{highlight}\033[0m{end_context}", (...) 1276 end_context=end_context, 1277 ) 1279 if self.error_level == ErrorLevel.IMMEDIATE: -> 1280 raise error 1282 self.errors.append(error) ParseError: Invalid expression / Unexpected token. Line 4, Col: 6. INATED BY '' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY ' ' STORED AS TEXTFILE SELECT * FROM `a`.`b` ``` **Official Documentation** https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_hive.py::TestHive::test_hive" ]
[ "tests/dialects/test_hive.py::TestHive::test_bits", "tests/dialects/test_hive.py::TestHive::test_cast", "tests/dialects/test_hive.py::TestHive::test_data_type", "tests/dialects/test_hive.py::TestHive::test_ddl", "tests/dialects/test_hive.py::TestHive::test_escapes", "tests/dialects/test_hive.py::TestHive::test_lateral_view", "tests/dialects/test_hive.py::TestHive::test_order_by", "tests/dialects/test_hive.py::TestHive::test_quotes", "tests/dialects/test_hive.py::TestHive::test_regex", "tests/dialects/test_hive.py::TestHive::test_time" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-04-16T11:45:58Z"
mit
tobymao__sqlglot-3360
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py index 71339b88..2e53a675 100644 --- a/sqlglot/dialects/postgres.py +++ b/sqlglot/dialects/postgres.py @@ -518,6 +518,7 @@ class Postgres(Dialect): exp.Variance: rename_func("VAR_SAMP"), exp.Xor: bool_xor_sql, } + TRANSFORMS.pop(exp.CommentColumnConstraint) PROPERTIES_LOCATION = { **generator.Generator.PROPERTIES_LOCATION, @@ -526,6 +527,10 @@ class Postgres(Dialect): exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, } + def commentcolumnconstraint_sql(self, expression: exp.CommentColumnConstraint) -> str: + self.unsupported("Column comments are not supported in the CREATE statement") + return "" + def unnest_sql(self, expression: exp.Unnest) -> str: if len(expression.expressions) == 1: from sqlglot.optimizer.annotate_types import annotate_types diff --git a/sqlglot/lineage.py b/sqlglot/lineage.py index c91bb36e..f4a3dec5 100644 --- a/sqlglot/lineage.py +++ b/sqlglot/lineage.py @@ -129,12 +129,6 @@ def to_node( reference_node_name: t.Optional[str] = None, trim_selects: bool = True, ) -> Node: - source_names = { - dt.alias: dt.comments[0].split()[1] - for dt in scope.derived_tables - if dt.comments and dt.comments[0].startswith("source: ") - } - # Find the specific select clause that is the source of the column we want. # This can either be a specific, named select or a generic `*` clause. select = ( @@ -242,6 +236,19 @@ def to_node( # If the source is a UDTF find columns used in the UTDF to generate the table if isinstance(source, exp.UDTF): source_columns |= set(source.find_all(exp.Column)) + derived_tables = [ + source.expression.parent + for source in scope.sources.values() + if isinstance(source, Scope) and source.is_derived_table + ] + else: + derived_tables = scope.derived_tables + + source_names = { + dt.alias: dt.comments[0].split()[1] + for dt in derived_tables + if dt.comments and dt.comments[0].startswith("source: ") + } for c in source_columns: table = c.table diff --git a/sqlglot/optimizer/__init__.py b/sqlglot/optimizer/__init__.py index 34ea6cb1..050f246c 100644 --- a/sqlglot/optimizer/__init__.py +++ b/sqlglot/optimizer/__init__.py @@ -1,11 +1,11 @@ # ruff: noqa: F401 -from sqlglot.optimizer.optimizer import RULES, optimize +from sqlglot.optimizer.optimizer import RULES as RULES, optimize as optimize from sqlglot.optimizer.scope import ( - Scope, - build_scope, - find_all_in_scope, - find_in_scope, - traverse_scope, - walk_in_scope, + Scope as Scope, + build_scope as build_scope, + find_all_in_scope as find_all_in_scope, + find_in_scope as find_in_scope, + traverse_scope as traverse_scope, + walk_in_scope as walk_in_scope, )
tobymao/sqlglot
f697cb16b6d744253febb2f83476853e63e06f88
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 5a55a7d6..1ed7d82f 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -314,6 +314,12 @@ class TestPostgres(Validator): ) self.validate_identity("SELECT * FROM t1*", "SELECT * FROM t1") + self.validate_all( + "CREATE TABLE t (c INT)", + read={ + "mysql": "CREATE TABLE t (c INT COMMENT 'comment')", + }, + ) self.validate_all( 'SELECT * FROM "test_table" ORDER BY RANDOM() LIMIT 5', write={ diff --git a/tests/test_lineage.py b/tests/test_lineage.py index c782d9ae..cbadf7bb 100644 --- a/tests/test_lineage.py +++ b/tests/test_lineage.py @@ -224,16 +224,50 @@ class TestLineage(unittest.TestCase): downstream.source.sql(dialect="snowflake"), "LATERAL FLATTEN(INPUT => TEST_TABLE.RESULT, OUTER => TRUE) AS FLATTENED(SEQ, KEY, PATH, INDEX, VALUE, THIS)", ) - self.assertEqual( - downstream.expression.sql(dialect="snowflake"), - "VALUE", - ) + self.assertEqual(downstream.expression.sql(dialect="snowflake"), "VALUE") self.assertEqual(len(downstream.downstream), 1) downstream = downstream.downstream[0] self.assertEqual(downstream.name, "TEST_TABLE.RESULT") self.assertEqual(downstream.source.sql(dialect="snowflake"), "TEST_TABLE AS TEST_TABLE") + node = lineage( + "FIELD", + "SELECT FLATTENED.VALUE:field::text AS FIELD FROM SNOWFLAKE.SCHEMA.MODEL AS MODEL_ALIAS, LATERAL FLATTEN(INPUT => MODEL_ALIAS.A) AS FLATTENED", + schema={"SNOWFLAKE": {"SCHEMA": {"TABLE": {"A": "integer"}}}}, + sources={"SNOWFLAKE.SCHEMA.MODEL": "SELECT A FROM SNOWFLAKE.SCHEMA.TABLE"}, + dialect="snowflake", + ) + self.assertEqual(node.name, "FIELD") + + downstream = node.downstream[0] + self.assertEqual(downstream.name, "FLATTENED.VALUE") + self.assertEqual( + downstream.source.sql(dialect="snowflake"), + "LATERAL FLATTEN(INPUT => MODEL_ALIAS.A) AS FLATTENED(SEQ, KEY, PATH, INDEX, VALUE, THIS)", + ) + self.assertEqual(downstream.expression.sql(dialect="snowflake"), "VALUE") + self.assertEqual(len(downstream.downstream), 1) + + downstream = downstream.downstream[0] + self.assertEqual(downstream.name, "MODEL_ALIAS.A") + self.assertEqual(downstream.source_name, "SNOWFLAKE.SCHEMA.MODEL") + self.assertEqual( + downstream.source.sql(dialect="snowflake"), + "SELECT TABLE.A AS A FROM SNOWFLAKE.SCHEMA.TABLE AS TABLE", + ) + self.assertEqual(downstream.expression.sql(dialect="snowflake"), "TABLE.A AS A") + self.assertEqual(len(downstream.downstream), 1) + + downstream = downstream.downstream[0] + self.assertEqual(downstream.name, "TABLE.A") + self.assertEqual( + downstream.source.sql(dialect="snowflake"), "SNOWFLAKE.SCHEMA.TABLE AS TABLE" + ) + self.assertEqual( + downstream.expression.sql(dialect="snowflake"), "SNOWFLAKE.SCHEMA.TABLE AS TABLE" + ) + def test_subquery(self) -> None: node = lineage( "output",
No `source_name` in column lineage with `LATERAL FLATTEN` **Fully reproducible code snippet** ```python from sqlglot.lineage import lineage query = """SELECT FLATTENED.VALUE:field::text AS FIELD FROM SNOWFLAKE.SCHEMA.MODEL AS MODEL_ALIAS, LATERAL FLATTEN(INPUT => MODEL_ALIAS.A ) AS FLATTENED """ sources = {"SNOWFLAKE.SCHEMA.MODEL": "SELECT A FROM SNOWFLAKE.SCHEMA.TABLE"} schemas = { "SCHEMA": { "TABLE": {"A": "integer"}, } } result = lineage("FIELD", query, schemas, sources, dialect="snowflake") for node in result.walk(): print(f"Name: {node.name}, Source: {node.source_name}") ``` The output is: ``` Name: FIELD, Source: Name: FLATTENED.VALUE, Source: Name: MODEL_ALIAS.A, Source: Name: TABLE.A, Source: ``` I would expect the `MODEL_ALIAS.A` node to have `source_name` equal to `SNOWFLAKE.SCHEMA.MODEL` because that's the source (`sources` argument to the `lineage` function) where that column appears. That's how that field seems to work in other queries. For example, changing the query to: ```sql SELECT MODEL_ALIAS.A AS FIELD FROM SNOWFLAKE.SCHEMA.MODEL AS MODEL_ALIAS ``` gives: ``` Name: FIELD, Source: Name: MODEL_ALIAS.A, Source: SNOWFLAKE.SCHEMA.MODEL Name: TABLE.A, Source: ``` I believe the root cause is that the expanded-and-qualified query has a `source: ` comment after the the first element in the `FROM` clause but not for the `LATERAL FLATTEN`?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_postgres", "tests/test_lineage.py::TestLineage::test_lineage_lateral_flatten" ]
[ "tests/dialects/test_postgres.py::TestPostgres::test_array_offset", "tests/dialects/test_postgres.py::TestPostgres::test_bool_or", "tests/dialects/test_postgres.py::TestPostgres::test_ddl", "tests/dialects/test_postgres.py::TestPostgres::test_operator", "tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary", "tests/dialects/test_postgres.py::TestPostgres::test_string_concat", "tests/dialects/test_postgres.py::TestPostgres::test_unnest", "tests/dialects/test_postgres.py::TestPostgres::test_unnest_json_array", "tests/dialects/test_postgres.py::TestPostgres::test_variance", "tests/test_lineage.py::TestLineage::test_ddl_lineage", "tests/test_lineage.py::TestLineage::test_lineage", "tests/test_lineage.py::TestLineage::test_lineage_cte_name_appears_in_schema", "tests/test_lineage.py::TestLineage::test_lineage_cte_union", "tests/test_lineage.py::TestLineage::test_lineage_external_col", "tests/test_lineage.py::TestLineage::test_lineage_normalize", "tests/test_lineage.py::TestLineage::test_lineage_source_union", "tests/test_lineage.py::TestLineage::test_lineage_source_with_cte", "tests/test_lineage.py::TestLineage::test_lineage_source_with_star", "tests/test_lineage.py::TestLineage::test_lineage_sql_with_cte", "tests/test_lineage.py::TestLineage::test_lineage_union", "tests/test_lineage.py::TestLineage::test_lineage_values", "tests/test_lineage.py::TestLineage::test_select_star", "tests/test_lineage.py::TestLineage::test_subquery", "tests/test_lineage.py::TestLineage::test_trim", "tests/test_lineage.py::TestLineage::test_unnest" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2024-04-26T13:03:48Z"
mit
tobymao__sqlglot-3385
diff --git a/sqlglot/dialects/trino.py b/sqlglot/dialects/trino.py index 457e2f05..4b5f8e0d 100644 --- a/sqlglot/dialects/trino.py +++ b/sqlglot/dialects/trino.py @@ -1,7 +1,7 @@ from __future__ import annotations from sqlglot import exp -from sqlglot.dialects.dialect import merge_without_target_sql +from sqlglot.dialects.dialect import merge_without_target_sql, trim_sql from sqlglot.dialects.presto import Presto @@ -9,12 +9,19 @@ class Trino(Presto): SUPPORTS_USER_DEFINED_TYPES = False LOG_BASE_FIRST = True + class Parser(Presto.Parser): + FUNCTION_PARSERS = { + **Presto.Parser.FUNCTION_PARSERS, + "TRIM": lambda self: self._parse_trim(), + } + class Generator(Presto.Generator): TRANSFORMS = { **Presto.Generator.TRANSFORMS, exp.ArraySum: lambda self, e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", exp.Merge: merge_without_target_sql, + exp.Trim: trim_sql, } SUPPORTED_JSON_PATH_PARTS = {
tobymao/sqlglot
f85b8e1017acb9d6b64489076a461d647076b419
diff --git a/tests/dialects/test_trino.py b/tests/dialects/test_trino.py new file mode 100644 index 00000000..ccc1407f --- /dev/null +++ b/tests/dialects/test_trino.py @@ -0,0 +1,18 @@ +from tests.dialects.test_dialect import Validator + + +class TestTrino(Validator): + dialect = "trino" + + def test_trim(self): + self.validate_identity("SELECT TRIM('!' FROM '!foo!')") + self.validate_identity("SELECT TRIM(BOTH '$' FROM '$var$')") + self.validate_identity("SELECT TRIM(TRAILING 'ER' FROM UPPER('worker'))") + self.validate_identity( + "SELECT TRIM(LEADING FROM ' abcd')", + "SELECT LTRIM(' abcd')", + ) + self.validate_identity( + "SELECT TRIM('!foo!', '!')", + "SELECT TRIM('!' FROM '!foo!')", + )
Can't parse `trim` in TrinoSQL **Fully reproducible code snippet** Please include a fully reproducible code snippet or the input sql, dialect, and expected output. ```python import sqlglot print(sqlglot.__version__) sql = "SELECT trim(',' FROM some_col);" result = sqlglot.parse(sql, read="trino") print(repr(result)) ``` Expected: ``` 23.12.2 [Select( expressions=[ Trim( this=Column( this=Identifier(this=some_col, quoted=False)), expression=Literal(this=,, is_string=True))])] ``` Got: ``` 23.12.2 Traceback (most recent call last): File "proof.py", line 7, in <module> result = sqlglot.parse(sql, read="trino") File ".../python3.8/site-packages/sqlglot/__init__.py", line 102, in parse return Dialect.get_or_raise(read or dialect).parse(sql, **opts) File ".../python3.8/site-packages/sqlglot/dialects/dialect.py", line 506, in parse return self.parser(**opts).parse(self.tokenize(sql), sql) File ".../python3.8/site-packages/sqlglot/parser.py", line 1175, in parse return self._parse( File ".../python3.8/site-packages/sqlglot/parser.py", line 1241, in _parse expressions.append(parse_method(self)) File ".../python3.8/site-packages/sqlglot/parser.py", line 1476, in _parse_statement expression = self._parse_set_operations(expression) if expression else self._parse_select() File ".../python3.8/site-packages/sqlglot/parser.py", line 2532, in _parse_select projections = self._parse_projections() File ".../python3.8/site-packages/sqlglot/parser.py", line 2480, in _parse_projections return self._parse_expressions() File ".../python3.8/site-packages/sqlglot/parser.py", line 5695, in _parse_expressions return self._parse_csv(self._parse_expression) File ".../python3.8/site-packages/sqlglot/parser.py", line 5649, in _parse_csv parse_result = parse_method() File ".../python3.8/site-packages/sqlglot/parser.py", line 3805, in _parse_expression return self._parse_alias(self._parse_conjunction()) File ".../python3.8/site-packages/sqlglot/parser.py", line 3808, in _parse_conjunction return self._parse_tokens(self._parse_equality, self.CONJUNCTION) File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens this = parse_method() File ".../python3.8/site-packages/sqlglot/parser.py", line 3811, in _parse_equality return self._parse_tokens(self._parse_comparison, self.EQUALITY) File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens this = parse_method() File ".../python3.8/site-packages/sqlglot/parser.py", line 3814, in _parse_comparison return self._parse_tokens(self._parse_range, self.COMPARISON) File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens this = parse_method() File ".../python3.8/site-packages/sqlglot/parser.py", line 3817, in _parse_range this = this or self._parse_bitwise() File ".../python3.8/site-packages/sqlglot/parser.py", line 3941, in _parse_bitwise this = self._parse_term() File ".../python3.8/site-packages/sqlglot/parser.py", line 3973, in _parse_term return self._parse_tokens(self._parse_factor, self.TERM) File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens this = parse_method() File ".../python3.8/site-packages/sqlglot/parser.py", line 3977, in _parse_factor this = parse_method() File ".../python3.8/site-packages/sqlglot/parser.py", line 3998, in _parse_unary return self._parse_at_time_zone(self._parse_type()) File ".../python3.8/site-packages/sqlglot/parser.py", line 4020, in _parse_type this = self._parse_column() File ".../python3.8/site-packages/sqlglot/parser.py", line 4220, in _parse_column this = self._parse_column_reference() File ".../python3.8/site-packages/sqlglot/parser.py", line 4224, in _parse_column_reference this = self._parse_field() File ".../python3.8/site-packages/sqlglot/parser.py", line 4347, in _parse_field field = self._parse_primary() or self._parse_function( File ".../python3.8/site-packages/sqlglot/parser.py", line 4370, in _parse_function func = self._parse_function_call( File ".../python3.8/site-packages/sqlglot/parser.py", line 4458, in _parse_function_call self._match_r_paren(this) File ".../python3.8/site-packages/sqlglot/parser.py", line 6196, in _match_r_paren self.raise_error("Expecting )") File ".../python3.8/site-packages/sqlglot/parser.py", line 1285, in raise_error raise error sqlglot.errors.ParseError: Expecting ). Line 1, Col: 20. SELECT trim(',' FROM some_col); ``` **Official Documentation** https://trino.io/docs/current/functions/string.html?highlight=trim#trim
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_trino.py::TestTrino::test_trim" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2024-04-30T21:59:25Z"
mit
tobymao__sqlglot-805
diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py index 0ab32188..6be68ace 100644 --- a/sqlglot/dialects/bigquery.py +++ b/sqlglot/dialects/bigquery.py @@ -110,17 +110,17 @@ class BigQuery(Dialect): KEYWORDS = { **tokens.Tokenizer.KEYWORDS, + "BEGIN": TokenType.COMMAND, + "BEGIN TRANSACTION": TokenType.BEGIN, "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, "CURRENT_TIME": TokenType.CURRENT_TIME, "GEOGRAPHY": TokenType.GEOGRAPHY, - "INT64": TokenType.BIGINT, "FLOAT64": TokenType.DOUBLE, + "INT64": TokenType.BIGINT, + "NOT DETERMINISTIC": TokenType.VOLATILE, "QUALIFY": TokenType.QUALIFY, "UNKNOWN": TokenType.NULL, "WINDOW": TokenType.WINDOW, - "NOT DETERMINISTIC": TokenType.VOLATILE, - "BEGIN": TokenType.COMMAND, - "BEGIN TRANSACTION": TokenType.BEGIN, } KEYWORDS.pop("DIV") @@ -131,6 +131,7 @@ class BigQuery(Dialect): "DATE_ADD": _date_add(exp.DateAdd), "DATETIME_ADD": _date_add(exp.DatetimeAdd), "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), + "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, "TIME_ADD": _date_add(exp.TimeAdd), "TIMESTAMP_ADD": _date_add(exp.TimestampAdd), "DATE_SUB": _date_add(exp.DateSub), @@ -183,6 +184,7 @@ class BigQuery(Dialect): exp.VolatilityProperty: lambda self, e: f"DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC", + exp.RegexpLike: rename_func("REGEXP_CONTAINS"), } TYPE_MAPPING = {
tobymao/sqlglot
3d0216fd102e7dbe585999e9cb961f31cd5bfa53
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py index 5c5a7713..1d60ec65 100644 --- a/tests/dialects/test_bigquery.py +++ b/tests/dialects/test_bigquery.py @@ -6,6 +6,11 @@ class TestBigQuery(Validator): dialect = "bigquery" def test_bigquery(self): + self.validate_all( + "REGEXP_CONTAINS('foo', '.*')", + read={"bigquery": "REGEXP_CONTAINS('foo', '.*')"}, + write={"mysql": "REGEXP_LIKE('foo', '.*')"}, + ), self.validate_all( '"""x"""', write={
Functions transpiled incorrectly from MySQL to BigQuery ```python import sqlglot print(sqlglot.transpile("SELECT 'foo' regexp '.*';", read="mysql", write="bigquery")) ``` This currently transpiles to `SELECT REGEXP_LIKE('foo', '.*')` which is not valid for BigQuery (I guess the correct function would be `REGEXP_CONTAINS`). Overall, how robust should I expect transpilation to be? Are certain language pairs better supported than others? Are functions more problematic than other constructs?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery" ]
[ "tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2022-12-05T22:59:24Z"
mit
tobymao__sqlglot-902
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index ef21f0bb..26d8f945 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -2130,6 +2130,7 @@ class DataType(Expression): JSON = auto() JSONB = auto() INTERVAL = auto() + TIME = auto() TIMESTAMP = auto() TIMESTAMPTZ = auto() TIMESTAMPLTZ = auto() diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 133bf7f8..370b8a4f 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -112,6 +112,7 @@ class Parser(metaclass=_Parser): TokenType.JSON, TokenType.JSONB, TokenType.INTERVAL, + TokenType.TIME, TokenType.TIMESTAMP, TokenType.TIMESTAMPTZ, TokenType.TIMESTAMPLTZ, @@ -319,6 +320,7 @@ class Parser(metaclass=_Parser): } TIMESTAMPS = { + TokenType.TIME, TokenType.TIMESTAMP, TokenType.TIMESTAMPTZ, TokenType.TIMESTAMPLTZ, @@ -1915,7 +1917,10 @@ class Parser(metaclass=_Parser): ): value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions) elif self._match(TokenType.WITHOUT_TIME_ZONE): - value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions) + if type_token == TokenType.TIME: + value = exp.DataType(this=exp.DataType.Type.TIME, expressions=expressions) + else: + value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions) maybe_func = maybe_func and value is None diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index 0efa7d02..8c5f13bd 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -86,6 +86,7 @@ class TokenType(AutoName): VARBINARY = auto() JSON = auto() JSONB = auto() + TIME = auto() TIMESTAMP = auto() TIMESTAMPTZ = auto() TIMESTAMPLTZ = auto() @@ -671,6 +672,7 @@ class Tokenizer(metaclass=_Tokenizer): "BLOB": TokenType.VARBINARY, "BYTEA": TokenType.VARBINARY, "VARBINARY": TokenType.VARBINARY, + "TIME": TokenType.TIME, "TIMESTAMP": TokenType.TIMESTAMP, "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
tobymao/sqlglot
70ebdf63648e94bdb13c68fddd6ee5db31fcda7d
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 1e048d5b..583d3496 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -122,6 +122,10 @@ class TestPostgres(Validator): "TO_TIMESTAMP(123::DOUBLE PRECISION)", write={"postgres": "TO_TIMESTAMP(CAST(123 AS DOUBLE PRECISION))"}, ) + self.validate_all( + "SELECT to_timestamp(123)::time without time zone", + write={"postgres": "SELECT CAST(TO_TIMESTAMP(123) AS TIME)"}, + ) self.validate_identity( "CREATE TABLE A (LIKE B INCLUDING CONSTRAINT INCLUDING COMPRESSION EXCLUDING COMMENTS)"
Postgres parse error on cast to time The following valid Postgres query produces a parsing error: ```python import sqlglot sql = """ SELECT to_timestamp(123)::time without time zone """ sqlglot.parse_one(sql, read="postgres") ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_postgres.py::TestPostgres::test_postgres" ]
[ "tests/dialects/test_postgres.py::TestPostgres::test_ddl" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-01-09T17:34:44Z"
mit
tobymao__sqlglot-911
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py index 3ef467b1..04d46d28 100644 --- a/sqlglot/dialects/clickhouse.py +++ b/sqlglot/dialects/clickhouse.py @@ -1,5 +1,7 @@ from __future__ import annotations +import typing as t + from sqlglot import exp, generator, parser, tokens from sqlglot.dialects.dialect import Dialect, inline_array_sql, var_map_sql from sqlglot.parser import parse_var_map @@ -22,6 +24,7 @@ class ClickHouse(Dialect): KEYWORDS = { **tokens.Tokenizer.KEYWORDS, "ASOF": TokenType.ASOF, + "GLOBAL": TokenType.GLOBAL, "DATETIME64": TokenType.DATETIME, "FINAL": TokenType.FINAL, "FLOAT32": TokenType.FLOAT, @@ -42,12 +45,27 @@ class ClickHouse(Dialect): "QUANTILEIF": lambda params, args: exp.QuantileIf(parameters=params, expressions=args), } + RANGE_PARSERS = { + **parser.Parser.RANGE_PARSERS, + TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN) + and self._parse_in(this, is_global=True), + } + JOIN_KINDS = {*parser.Parser.JOIN_KINDS, TokenType.ANY, TokenType.ASOF} # type: ignore TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {TokenType.ANY} # type: ignore - def _parse_table(self, schema=False): - this = super()._parse_table(schema) + def _parse_in( + self, this: t.Optional[exp.Expression], is_global: bool = False + ) -> exp.Expression: + this = super()._parse_in(this) + this.set("is_global", is_global) + return this + + def _parse_table( + self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None + ) -> t.Optional[exp.Expression]: + this = super()._parse_table(schema=schema, alias_tokens=alias_tokens) if self._match(TokenType.FINAL): this = self.expression(exp.Final, this=this) diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 2167c675..c78387d8 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -2449,6 +2449,7 @@ class In(Predicate): "query": False, "unnest": False, "field": False, + "is_global": False, } @@ -3002,8 +3003,10 @@ class StrToTime(Func): arg_types = {"this": True, "format": True} +# Spark allows unix_timestamp() +# https://spark.apache.org/docs/3.1.3/api/python/reference/api/pyspark.sql.functions.unix_timestamp.html class StrToUnix(Func): - arg_types = {"this": True, "format": True} + arg_types = {"this": False, "format": False} class NumberToStr(Func): diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 21ab41eb..c690ec09 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1141,6 +1141,8 @@ class Generator: query = expression.args.get("query") unnest = expression.args.get("unnest") field = expression.args.get("field") + is_global = " GLOBAL" if expression.args.get("is_global") else "" + if query: in_sql = self.wrap(query) elif unnest: @@ -1149,7 +1151,8 @@ class Generator: in_sql = self.sql(field) else: in_sql = f"({self.expressions(expression, flat=True)})" - return f"{self.sql(expression, 'this')} IN {in_sql}" + + return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}" def in_unnest_op(self, unnest: exp.Unnest) -> str: return f"(SELECT {self.sql(unnest)})" diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index e211ff78..32989920 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -182,6 +182,7 @@ class TokenType(AutoName): FUNCTION = auto() FROM = auto() GENERATED = auto() + GLOBAL = auto() GROUP_BY = auto() GROUPING_SETS = auto() HAVING = auto()
tobymao/sqlglot
55a21cdb4fdc235433a0cedf93a907f5a70e6d23
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py index 6801e6f8..109e9f38 100644 --- a/tests/dialects/test_clickhouse.py +++ b/tests/dialects/test_clickhouse.py @@ -16,6 +16,7 @@ class TestClickhouse(Validator): self.validate_identity("SELECT * FROM foo ANY JOIN bla") self.validate_identity("SELECT quantile(0.5)(a)") self.validate_identity("SELECT quantiles(0.5)(a) AS x FROM t") + self.validate_identity("SELECT * FROM foo WHERE x GLOBAL IN (SELECT * FROM bar)") self.validate_all( "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname ASC NULLS LAST, lname", diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py index 7395e727..f287a89d 100644 --- a/tests/dialects/test_spark.py +++ b/tests/dialects/test_spark.py @@ -207,6 +207,7 @@ TBLPROPERTIES ( ) def test_spark(self): + self.validate_identity("SELECT UNIX_TIMESTAMP()") self.validate_all( "ARRAY_SORT(x, (left, right) -> -1)", write={
Support "GLOBAL IN" in sql query while using transpile function I'm using CLICKHOUSE dialect ```python from sqlglot import transpile, Dialects sql = """ SELECT field FROM table WHERE (SELECT count(*) FROM q) > 0 AND scheme.field GLOBAL IN (SELECT DISTINCT field FROM q) """ sql = transpile(sql, read=Dialects.CLICKHOUSE, identify=True, pretty=True)[0] ``` Here is the ERROR that I get: ``` sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 5, Col: 22. SELECT field FROM table where (select count(*) from q) > 0 and scheme.field gΜ²lΜ²oΜ²bΜ²aΜ²lΜ² in (select distinct field from q)) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse", "tests/dialects/test_spark.py::TestSpark::test_spark" ]
[ "tests/dialects/test_spark.py::TestSpark::test_ddl", "tests/dialects/test_spark.py::TestSpark::test_hint", "tests/dialects/test_spark.py::TestSpark::test_iif", "tests/dialects/test_spark.py::TestSpark::test_to_date" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-01-11T13:11:31Z"
mit
tomMoral__loky-202
diff --git a/loky/backend/context.py b/loky/backend/context.py index 0f744c5..dfeb4ed 100644 --- a/loky/backend/context.py +++ b/loky/backend/context.py @@ -211,8 +211,8 @@ class LokyContext(BaseContext): """ def Semaphore(self, value=1): """Returns a semaphore object""" - from . import synchronize - return synchronize.Semaphore(value=value) + from .synchronize import Semaphore + return Semaphore(value=value) def BoundedSemaphore(self, value): """Returns a bounded semaphore object""" diff --git a/loky/backend/reduction.py b/loky/backend/reduction.py index 0d40c5e..0bad5f6 100644 --- a/loky/backend/reduction.py +++ b/loky/backend/reduction.py @@ -188,7 +188,11 @@ def set_loky_pickler(loky_pickler=None): if sys.version_info < (3,): self.dispatch = self._dispatch.copy() else: - self.dispatch_table = self._dispatch_table.copy() + if getattr(self, "dispatch_table", None) is not None: + self.dispatch_table.update(self._dispatch_table.copy()) + else: + self.dispatch_table = self._dispatch_table.copy() + for type, reduce_func in reducers.items(): self.register(type, reduce_func) diff --git a/loky/backend/semaphore_tracker.py b/loky/backend/semaphore_tracker.py index 7d3f23e..c83b8c6 100644 --- a/loky/backend/semaphore_tracker.py +++ b/loky/backend/semaphore_tracker.py @@ -37,10 +37,13 @@ except ImportError: from .semlock import sem_unlink if sys.version_info < (3,): - BrokenPipeError = IOError + BrokenPipeError = OSError __all__ = ['ensure_running', 'register', 'unregister'] +_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + VERBOSE = False @@ -68,6 +71,13 @@ class SemaphoreTracker(object): return # => dead, launch it again os.close(self._fd) + try: + # Clean-up to avoid dangling processes. + os.waitpid(self._pid, 0) + except OSError: + # The process was terminated or is a child from an ancestor + # of the current process. + pass self._fd = None self._pid = None @@ -80,8 +90,9 @@ class SemaphoreTracker(object): except Exception: pass - cmd = 'from {} import main; main(%d)'.format(main.__module__) r, w = os.pipe() + cmd = 'from {} import main; main({}, {})'.format( + main.__module__, r, VERBOSE) try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid @@ -94,9 +105,23 @@ class SemaphoreTracker(object): import re for i in range(1, len(args)): args[i] = re.sub("-R+", "-R", args[i]) - args += ['-c', cmd % r] + args += ['-c', cmd] util.debug("launching Semaphore tracker: {}".format(args)) - pid = spawnv_passfds(exe, args, fds_to_pass) + # bpo-33613: Register a signal mask that will block the + # signals. This signal mask will be inherited by the child + # that is going to be spawned and will protect the child from a + # race condition that can make the child die before it + # registers signal handlers for SIGINT and SIGTERM. The mask is + # unregistered after spawning the child. + try: + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_BLOCK, + _IGNORED_SIGNALS) + pid = spawnv_passfds(exe, args, fds_to_pass) + finally: + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, + _IGNORED_SIGNALS) except BaseException: os.close(w) raise @@ -142,19 +167,22 @@ unregister = _semaphore_tracker.unregister getfd = _semaphore_tracker.getfd -def main(fd): +def main(fd, verbose=0): '''Run semaphore tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass - if VERBOSE: # pragma: no cover + if verbose: # pragma: no cover sys.stderr.write("Main semaphore tracker is running\n") sys.stderr.flush() @@ -168,14 +196,14 @@ def main(fd): if cmd == b'REGISTER': name = name.decode('ascii') cache.add(name) - if VERBOSE: # pragma: no cover + if verbose: # pragma: no cover sys.stderr.write("[SemaphoreTracker] register {}\n" .format(name)) sys.stderr.flush() elif cmd == b'UNREGISTER': name = name.decode('ascii') cache.remove(name) - if VERBOSE: # pragma: no cover + if verbose: # pragma: no cover sys.stderr.write("[SemaphoreTracker] unregister {}" ": cache({})\n" .format(name, len(cache))) @@ -205,16 +233,16 @@ def main(fd): try: try: sem_unlink(name) - if VERBOSE: # pragma: no cover + if verbose: # pragma: no cover sys.stderr.write("[SemaphoreTracker] unlink {}\n" .format(name)) sys.stderr.flush() except Exception as e: - warnings.warn('semaphore_tracker: %r: %r' % (name, e)) + warnings.warn('semaphore_tracker: %s: %r' % (name, e)) finally: pass - if VERBOSE: # pragma: no cover + if verbose: # pragma: no cover sys.stderr.write("semaphore tracker shut down\n") sys.stderr.flush() diff --git a/loky/backend/spawn.py b/loky/backend/spawn.py index fb375e5..c0390e4 100644 --- a/loky/backend/spawn.py +++ b/loky/backend/spawn.py @@ -151,7 +151,7 @@ def prepare(data): if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] - if 'tacker_pid' in data: + if 'tracker_pid' in data: from . import semaphore_tracker semaphore_tracker._semaphore_tracker._pid = data["tracker_pid"]
tomMoral/loky
1f5f77eb18735831a80c9936be52c831d12bcaa5
diff --git a/tests/test_semaphore_tracker.py b/tests/test_semaphore_tracker.py new file mode 100644 index 0000000..9e04a1d --- /dev/null +++ b/tests/test_semaphore_tracker.py @@ -0,0 +1,199 @@ +"""Tests for the SemaphoreTracker class""" +import errno +import gc +import io +import os +import pytest +import re +import signal +import sys +import time +import warnings +import weakref + +from loky import ProcessPoolExecutor +import loky.backend.semaphore_tracker as semaphore_tracker +from loky.backend.semlock import sem_unlink +from loky.backend.context import get_context + + +def get_sem_tracker_pid(): + semaphore_tracker.ensure_running() + return semaphore_tracker._semaphore_tracker._pid + + [email protected](sys.platform == "win32", + reason="no semaphore_tracker on windows") +class TestSemaphoreTracker: + def test_child_retrieves_semaphore_tracker(self): + parent_sem_tracker_pid = get_sem_tracker_pid() + executor = ProcessPoolExecutor(max_workers=1) + + # Register a semaphore in the parent process, and un-register it in the + # child process. If the two processes do not share the same + # semaphore_tracker, a cache KeyError should be printed in stderr. + import subprocess + semlock_name = 'loky-mysemaphore' + cmd = '''if 1: + import os, sys + + from loky import ProcessPoolExecutor + from loky.backend import semaphore_tracker + from loky.backend.semlock import SemLock + + semaphore_tracker.VERBOSE=True + semlock_name = "{}" + + # We don't need to create the semaphore as registering / unregistering + # operations simply add / remove entries from a cache, but do not + # manipulate the actual semaphores. + semaphore_tracker.register(semlock_name) + + def unregister(name): + # semaphore_tracker.unregister is actually a bound method of the + # SemaphoreTracker. We need a custom wrapper to avoid object + # serialization. + from loky.backend import semaphore_tracker + semaphore_tracker.unregister(semlock_name) + + e = ProcessPoolExecutor(1) + e.submit(unregister, semlock_name).result() + e.shutdown() + ''' + try: + child_sem_tracker_pid = executor.submit( + get_sem_tracker_pid).result() + + # First simple pid retrieval check (see #200) + assert child_sem_tracker_pid == parent_sem_tracker_pid + + p = subprocess.Popen( + [sys.executable, '-E', '-c', cmd.format(semlock_name)], + stderr=subprocess.PIPE) + p.wait() + + err = p.stderr.read().decode('utf-8') + p.stderr.close() + + assert re.search("unregister %s" % semlock_name, err) is not None + assert re.search("KeyError: '%s'" % semlock_name, err) is None + + finally: + executor.shutdown() + + + # The following four tests are inspired from cpython _test_multiprocessing + def test_semaphore_tracker(self): + # + # Check that killing process does not leak named semaphores + # + import subprocess + cmd = '''if 1: + import time, os + from loky.backend.synchronize import Lock + + # close manually the read end of the pipe in the child process + # because pass_fds does not exist for python < 3.2 + os.close(%d) + + lock1 = Lock() + lock2 = Lock() + os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n") + os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n") + time.sleep(10) + ''' + r, w = os.pipe() + + if sys.version_info[:2] >= (3, 2): + fd_kws = {'pass_fds': [w, r]} + else: + fd_kws = {'close_fds': False} + p = subprocess.Popen([sys.executable, + '-E', '-c', cmd % (r, w, w)], + stderr=subprocess.PIPE, + **fd_kws) + os.close(w) + with io.open(r, 'rb', closefd=True) as f: + name1 = f.readline().rstrip().decode('ascii') + name2 = f.readline().rstrip().decode('ascii') + + # subprocess holding a reference to lock1 is still alive, so this call + # should succeed + sem_unlink(name1) + p.terminate() + p.wait() + time.sleep(2.0) + with pytest.raises(OSError) as ctx: + sem_unlink(name2) + # docs say it should be ENOENT, but OSX seems to give EINVAL + assert ctx.value.errno in (errno.ENOENT, errno.EINVAL) + err = p.stderr.read().decode('utf-8') + p.stderr.close() + expected = ('semaphore_tracker: There appear to be 2 leaked ' + 'semaphores') + assert re.search(expected, err) is not None + + # lock1 is still registered, but was destroyed externally: the tracker + # is expected to complain. + expected = ("semaphore_tracker: %s: (OSError\\(%d|" + "FileNotFoundError)" % (name1, errno.ENOENT)) + assert re.search(expected, err) is not None + + def check_semaphore_tracker_death(self, signum, should_die): + # bpo-31310: if the semaphore tracker process has died, it should + # be restarted implicitly. + from loky.backend.semaphore_tracker import _semaphore_tracker + pid = _semaphore_tracker._pid + if pid is not None: + os.kill(pid, signal.SIGKILL) + os.waitpid(pid, 0) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + _semaphore_tracker.ensure_running() + # in python < 3.3 , the race condition described in bpo-33613 still + # exists, as this fixe requires signal.pthread_sigmask + time.sleep(1.0) + pid = _semaphore_tracker._pid + + os.kill(pid, signum) + time.sleep(1.0) # give it time to die + + ctx = get_context("loky") + with warnings.catch_warnings(record=True) as all_warn: + warnings.simplefilter("always") + + # remove unrelated MacOS warning messages first + warnings.filterwarnings( + "ignore", message='semaphore are broken on OSX') + + sem = ctx.Semaphore() + sem.acquire() + sem.release() + wr = weakref.ref(sem) + # ensure `sem` gets collected, which triggers communication with + # the semaphore tracker + del sem + gc.collect() + assert wr() is None + if should_die: + assert len(all_warn) == 1 + the_warn = all_warn[0] + assert issubclass(the_warn.category, UserWarning) + assert "semaphore_tracker: process died" in str( + the_warn.message) + else: + assert len(all_warn) == 0 + + def test_semaphore_tracker_sigint(self): + # Catchable signal (ignored by semaphore tracker) + self.check_semaphore_tracker_death(signal.SIGINT, False) + + def test_semaphore_tracker_sigterm(self): + # Catchable signal (ignored by semaphore tracker) + self.check_semaphore_tracker_death(signal.SIGTERM, False) + + @pytest.mark.skipif(sys.version_info[0] < 3, + reason="warnings.catch_warnings limitation") + def test_semaphore_tracker_sigkill(self): + # Uncatchable signal. + self.check_semaphore_tracker_death(signal.SIGKILL, True)
_pid attribute of SemaphoreTracker is not passed to child Since #106, workers created by `loky` inherit their parent's semaphore tracker. However, it seems that the `_pid` attribute gets lost. This test here fails. I'll add a patch soon. ```python from loky import ProcessPoolExecutor import loky.backend.semaphore_tracker def get_semaphore_pid(): sem_tracker = loky.backend.semaphore_tracker._semaphore_tracker sem_tracker.ensure_running() return sem_tracker._pid def test_child_uses_parent_semaphore_tracker(): parent_semtracker_pid = get_semaphore_pid() executor = ProcessPoolExecutor(max_workers=2) child_semtracker_pid = executor.submit(get_semaphore_pid).result() assert parent_semtracker_pid == child_semtracker_pid if __name__ == "__main__": test_child_uses_parent_semaphore_tracker() ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_semaphore_tracker.py::TestSemaphoreTracker::test_semaphore_tracker" ]
[ "tests/test_semaphore_tracker.py::TestSemaphoreTracker::test_semaphore_tracker_sigint", "tests/test_semaphore_tracker.py::TestSemaphoreTracker::test_semaphore_tracker_sigterm", "tests/test_semaphore_tracker.py::TestSemaphoreTracker::test_semaphore_tracker_sigkill" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-03-12T13:45:15Z"
bsd-3-clause
tomerfiliba__plumbum-366
diff --git a/plumbum/cli/image.py b/plumbum/cli/image.py index b207fa3..502af80 100644 --- a/plumbum/cli/image.py +++ b/plumbum/cli/image.py @@ -2,7 +2,7 @@ from __future__ import print_function, division from plumbum import colors from .termsize import get_terminal_size -import . as cli +from .. import cli import sys class Image(object):
tomerfiliba/plumbum
16508155d6361be7a815d264c89e2b893120266b
diff --git a/tests/test_color.py b/tests/test_color.py index 5f4cd3f..2b8105d 100644 --- a/tests/test_color.py +++ b/tests/test_color.py @@ -2,6 +2,8 @@ import pytest from plumbum.colorlib.styles import ANSIStyle, Color, AttributeNotFound, ColorNotFound from plumbum.colorlib.names import color_html, FindNearest +# Just check to see if this file is importable +from plumbum.cli.image import Image class TestNearestColor: def test_exact(self):
SyntaxError in plumbum/cli/image.py image.py contains the line import . as cli All versions of python I tried (3.6, 3.5, 2.7) raise SyntaxError reading this line. According to the Python Language Reference, such syntax of the import statement is not allowed. What exactly is the purpose of this line? And how to achieve this purpose in legal python?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_color.py::TestNearestColor::test_allcolors", "tests/test_color.py::TestColorLoad::test_rgb", "tests/test_color.py::TestColorLoad::test_simple_name", "tests/test_color.py::TestColorLoad::test_different_names", "tests/test_color.py::TestColorLoad::test_loading_methods", "tests/test_color.py::TestANSIColor::test_ansi" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2017-12-22T22:08:12Z"
mit
tomerfiliba__plumbum-372
diff --git a/build.py b/build.py index 1cfd980..cacb752 100755 --- a/build.py +++ b/build.py @@ -25,7 +25,7 @@ class BuildProject(cli.Application): if twine is None: print("Twine not installed, cannot securely upload. Install twine.") else: - twine['upload','dist/*gztar' 'dist/*.exe' '*.whl'] & FG + twine['upload', 'dist/*tar.gz', 'dist/*.exe', 'dist/*.whl'] & FG if __name__ == "__main__": diff --git a/dev-requirements.txt b/dev-requirements.txt index db117b1..ebc89d8 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,5 +1,6 @@ pytest pytest-cov +pytest-mock pycparser<2.18 ; python_version < '2.7' paramiko<2.4 ; python_version < '2.7' paramiko ; python_version >= '2.7' diff --git a/plumbum/cli/application.py b/plumbum/cli/application.py index 75bad0e..5434de1 100644 --- a/plumbum/cli/application.py +++ b/plumbum/cli/application.py @@ -406,15 +406,13 @@ class Application(object): ngettext( "Expected at least {0} positional argument, got {1}", "Expected at least {0} positional arguments, got {1}", - min_args).format( - min_args, tailargs)) + min_args).format(min_args, tailargs)) elif len(tailargs) > max_args: raise PositionalArgumentsError( ngettext( "Expected at most {0} positional argument, got {1}", "Expected at most {0} positional arguments, got {1}", - max_args).format( - max_args, tailargs)) + max_args).format(max_args, tailargs)) # Positional arguement validataion if hasattr(self.main, 'positional'): diff --git a/plumbum/cli/i18n.py b/plumbum/cli/i18n.py index b85db2c..45ef8b9 100644 --- a/plumbum/cli/i18n.py +++ b/plumbum/cli/i18n.py @@ -8,9 +8,9 @@ if loc is None or loc.startswith('en'): return str def ngettext(self, str1, strN, n): if n==1: - return str1.format(n) + return str1.replace("{0}", str(n)) else: - return strN.format(n) + return strN.replace("{0}", str(n)) def get_translation_for(package_name): return NullTranslation() diff --git a/plumbum/machines/ssh_machine.py b/plumbum/machines/ssh_machine.py index e963745..68819a2 100644 --- a/plumbum/machines/ssh_machine.py +++ b/plumbum/machines/ssh_machine.py @@ -276,6 +276,7 @@ class PuttyMachine(SshMachine): user = local.env.user if port is not None: ssh_opts.extend(["-P", str(port)]) + scp_opts = list(scp_opts) + ["-P", str(port)] port = None SshMachine.__init__(self, host, user, port, keyfile = keyfile, ssh_command = ssh_command, scp_command = scp_command, ssh_opts = ssh_opts, scp_opts = scp_opts, encoding = encoding, @@ -292,5 +293,3 @@ class PuttyMachine(SshMachine): def session(self, isatty = False, new_session = False): return ShellSession(self.popen((), (["-t"] if isatty else ["-T"]), new_session = new_session), self.custom_encoding, isatty, self.connect_timeout) - -
tomerfiliba/plumbum
4ea9e4c00a8d36900071393954d2df90931ad689
diff --git a/tests/test_cli.py b/tests/test_cli.py index 95bc7c6..71298de 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -25,7 +25,9 @@ class SimpleApp(cli.Application): self.eggs = old self.tailargs = args - +class PositionalApp(cli.Application): + def main(self, one): + print("Got", one) class Geet(cli.Application): debug = cli.Flag("--debug") @@ -123,6 +125,24 @@ class TestCLI: _, rc = SimpleApp.run(["foo", "--bacon=hello"], exit = False) assert rc == 2 + + # Testing #371 + def test_extra_args(self, capsys): + + _, rc = PositionalApp.run(["positionalapp"], exit = False) + assert rc != 0 + stdout, stderr = capsys.readouterr() + assert "Expected at least" in stdout + + _, rc = PositionalApp.run(["positionalapp", "one"], exit = False) + assert rc == 0 + stdout, stderr = capsys.readouterr() + + _, rc = PositionalApp.run(["positionalapp", "one", "two"], exit = False) + assert rc != 0 + stdout, stderr = capsys.readouterr() + assert "Expected at most" in stdout + def test_subcommands(self): _, rc = Geet.run(["geet", "--debug"], exit = False) assert rc == 0 diff --git a/tests/test_putty.py b/tests/test_putty.py new file mode 100644 index 0000000..763407f --- /dev/null +++ b/tests/test_putty.py @@ -0,0 +1,69 @@ +"""Test that PuttyMachine initializes its SshMachine correctly""" + +import pytest +from plumbum import PuttyMachine, SshMachine + +from plumbum._testtools import xfail_on_pypy + + [email protected](params=['default', '322']) +def ssh_port(request): + return request.param + + +class TestPuttyMachine: + @xfail_on_pypy + def test_putty_command(self, mocker, ssh_port): + local = mocker.patch('plumbum.machines.ssh_machine.local') + init = mocker.spy(SshMachine, '__init__') + mocker.patch('plumbum.machines.ssh_machine.BaseRemoteMachine') + + host = mocker.MagicMock() + user = local.env.user + port = keyfile = None + ssh_command = local["plink"] + scp_command = local["pscp"] + ssh_opts = ["-ssh"] + if ssh_port == 'default': + putty_port = None + scp_opts = () + else: + putty_port = int(ssh_port) + ssh_opts.extend(['-P', ssh_port]) + scp_opts = ['-P', ssh_port] + encoding = mocker.MagicMock() + connect_timeout = 20 + new_session = True + + PuttyMachine( + host, + port=putty_port, + connect_timeout=connect_timeout, + new_session=new_session, + encoding=encoding, + ) + + init.assert_called_with( + mocker.ANY, + host, + user, + port, + keyfile=keyfile, + ssh_command=ssh_command, + scp_command=scp_command, + ssh_opts=ssh_opts, + scp_opts=scp_opts, + encoding=encoding, + connect_timeout=connect_timeout, + new_session=new_session, + ) + + def test_putty_str(self, mocker): + local = mocker.patch('plumbum.machines.ssh_machine.local') + mocker.patch('plumbum.machines.ssh_machine.BaseRemoteMachine') + + host = mocker.MagicMock() + user = local.env.user + + machine = PuttyMachine(host) + assert str(machine) == 'putty-ssh://{0}@{1}'.format(user, host)
"IndexError: tuple index out of range" on missing arguments ![image](https://user-images.githubusercontent.com/19155205/34913485-79a6fe3a-f8c4-11e7-8771-2f6066a9bba6.png) When trying to run a subcommand without the correct amount of arguments I get a python exception. Using conda and python 3.6 plumbum version: - plumbum=1.6.5=py36_0
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_cli.py::TestCLI::test_extra_args", "tests/test_putty.py::TestPuttyMachine::test_putty_command[322]" ]
[ "tests/test_cli.py::TestInheritedApp::test_help", "tests/test_cli.py::TestCLI::test_meta_switches", "tests/test_cli.py::TestCLI::test_okay", "tests/test_cli.py::TestCLI::test_failures", "tests/test_cli.py::TestCLI::test_subcommands", "tests/test_cli.py::TestCLI::test_unbind", "tests/test_cli.py::TestCLI::test_default_main", "tests/test_cli.py::TestCLI::test_lazy_subcommand", "tests/test_cli.py::TestCLI::test_reset_switchattr", "tests/test_cli.py::TestCLI::test_invoke", "tests/test_cli.py::TestCLI::test_env_var", "tests/test_cli.py::TestCLI::test_mandatory_env_var", "tests/test_putty.py::TestPuttyMachine::test_putty_command[default]", "tests/test_putty.py::TestPuttyMachine::test_putty_str" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-01-24T09:29:04Z"
mit
tomerfiliba__plumbum-605
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0da79ea..03c4749 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ jobs: with: fetch-depth: 0 - uses: actions/setup-python@v2 - - uses: pre-commit/[email protected] + - uses: pre-commit/[email protected] - name: pylint run: | echo "::add-matcher::$GITHUB_WORKSPACE/.github/matchers/pylint.json" diff --git a/noxfile.py b/noxfile.py index 9b68bfe..9fae464 100644 --- a/noxfile.py +++ b/noxfile.py @@ -22,7 +22,7 @@ def pylint(session): Run pylint. """ - session.install(".", "paramiko", "ipython", "pylint") + session.install(".", "paramiko", "ipython", "pylint~=2.14.3") session.run("pylint", "plumbum", *session.posargs) diff --git a/plumbum/cli/progress.py b/plumbum/cli/progress.py index 3d21a8a..fce6b3c 100644 --- a/plumbum/cli/progress.py +++ b/plumbum/cli/progress.py @@ -69,7 +69,7 @@ class ProgressBase(ABC): return rval def next(self): - return self.__next__() + return next(self) @property def value(self): diff --git a/plumbum/commands/base.py b/plumbum/commands/base.py index 51e667d..eafbde3 100644 --- a/plumbum/commands/base.py +++ b/plumbum/commands/base.py @@ -547,7 +547,7 @@ class StdinDataRedirection(BaseCommand): return self.cmd.machine def popen(self, args=(), **kwargs): - if "stdin" in kwargs and kwargs["stdin"] != PIPE: + if kwargs.get("stdin") not in (PIPE, None): raise RedirectionError("stdin is already redirected") data = self.data if isinstance(data, str) and self._get_encoding() is not None: @@ -558,8 +558,9 @@ class StdinDataRedirection(BaseCommand): f.write(chunk) data = data[self.CHUNK_SIZE :] f.seek(0) + kwargs["stdin"] = f # try: - return self.cmd.popen(args, stdin=f, **kwargs) + return self.cmd.popen(args, **kwargs) # finally: # f.close() diff --git a/plumbum/fs/atomic.py b/plumbum/fs/atomic.py index 5aebc80..d796b2f 100644 --- a/plumbum/fs/atomic.py +++ b/plumbum/fs/atomic.py @@ -290,7 +290,7 @@ class PidFile: return self._ctx = self.atomicfile.locked(blocking=False) try: - self._ctx.__enter__() + self._ctx.__enter__() # pylint: disable=unnecessary-dunder-call except OSError: self._ctx = None try: diff --git a/pyproject.toml b/pyproject.toml index b34fa89..27ddb99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,6 +68,7 @@ ignore = [ [tool.pylint] master.py-version = "3.6" master.jobs = "0" +master.load-plugins = ["pylint.extensions.no_self_use"] reports.output-format = "colorized" similarities.ignore-imports = "yes" messages_control.enable = [ @@ -103,4 +104,5 @@ messages_control.disable = [ "too-many-return-statements", "too-many-statements", "unidiomatic-typecheck", # TODO: might be able to remove + "unnecessary-lambda-assignment", # TODO: 4 instances ]
tomerfiliba/plumbum
5fa1452846aae4a1fd2904bf145b0801c2050508
diff --git a/tests/test_local.py b/tests/test_local.py index dc76a7b..d96d39b 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -639,6 +639,25 @@ class TestLocalMachine: assert result[1] == EXPECT assert EXPECT == capfd.readouterr()[0] + @skip_on_windows + @pytest.mark.parametrize( + "modifier, expected", + [ + (FG, None), + (TF(FG=True), True), + (RETCODE(FG=True), 0), + (TEE, (0, "meow", "")), + ], + ) + def test_redirection_stdin_modifiers_fg(self, modifier, expected, capfd): + "StdinDataRedirection compatible with modifiers which write to stdout" + from plumbum.cmd import cat + + cmd = cat << "meow" + + assert cmd & modifier == expected + assert capfd.readouterr() == ("meow", "") + @skip_on_windows def test_logger_pipe(self): from plumbum.cmd import bash
StdinDataRedirection commands fail upon modification (TEE, FG, etc.) Commands to which standard input has been supplied from a Python string – which are `StdinDataRedirection` commands – erroneously throw exception `RedirectionError` when combined with built-in modifiers – stdin is already redirected – even though `stdin` is only being redirected the one time. --- ```python >>> from plumbum import TEE, FG, local # this works >>> local['ls']['-d', '/'] & FG / >>> local['ls']['-d', '/'] & TEE / (0, '/\n', '') # this does not work >>> (local['cat']['-'] << 'this breaks') & FG Traceback (most recent call last): … raise RedirectionError("stdin is already redirected") plumbum.commands.base.RedirectionError: stdin is already redirected ``` --- Affected modifiers invoke `cmd.run(stdin=None, …)`. It appears sufficient to change these to simply **omit the keyword** pair `stdin=None`. However, it's unclear whether this would have unwanted side-effects; and, it seems rather that `StdinDataRedirection` is in the wrong, in throwing an exception over the value `None`. `BaseRedirection.popen` permits either `PIPE` or `None`: ```python if self.KWARG in kwargs and kwargs[self.KWARG] not in (PIPE, None): raise RedirectionError(…) ``` `StdinDataRedirection.popen` only permits `PIPE`: ```python if "stdin" in kwargs and kwargs["stdin"] != PIPE: raise RedirectionError(…) ``` β†’ As such, the resolution is perhaps that the above check in `StdinDataRedirection.popen` be made to mirror `BaseRedirection.popen`.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_local.py::TestLocalMachine::test_redirection_stdin_modifiers_fg[modifier0-None]", "tests/test_local.py::TestLocalMachine::test_redirection_stdin_modifiers_fg[modifier1-True]", "tests/test_local.py::TestLocalMachine::test_redirection_stdin_modifiers_fg[modifier2-0]", "tests/test_local.py::TestLocalMachine::test_redirection_stdin_modifiers_fg[modifier3-expected3]" ]
[ "tests/test_local.py::TestLocalPopen::test_contextmanager", "tests/test_local.py::TestLocalPath::test_name", "tests/test_local.py::TestLocalPath::test_dirname", "tests/test_local.py::TestLocalPath::test_uri", "tests/test_local.py::TestLocalPath::test_pickle", "tests/test_local.py::TestLocalPath::test_empty", "tests/test_local.py::TestLocalPath::test_chown", "tests/test_local.py::TestLocalPath::test_split", "tests/test_local.py::TestLocalPath::test_suffix", "tests/test_local.py::TestLocalPath::test_newname", "tests/test_local.py::TestLocalPath::test_relative_to", "tests/test_local.py::TestLocalPath::test_read_write", "tests/test_local.py::TestLocalPath::test_parts", "tests/test_local.py::TestLocalPath::test_iterdir", "tests/test_local.py::TestLocalPath::test_stem", "tests/test_local.py::TestLocalPath::test_root_drive", "tests/test_local.py::TestLocalPath::test_compare_pathlib", "tests/test_local.py::TestLocalPath::test_suffix_expected", "tests/test_local.py::TestLocalPath::test_touch", "tests/test_local.py::TestLocalPath::test_copy_override", "tests/test_local.py::TestLocalPath::test_copy_nonexistant_dir", "tests/test_local.py::TestLocalPath::test_unlink", "tests/test_local.py::TestLocalPath::test_unhashable", "tests/test_local.py::TestLocalPath::test_getpath", "tests/test_local.py::TestLocalPath::test_path_dir", "tests/test_local.py::TestLocalPath::test_mkdir", "tests/test_local.py::TestLocalPath::test_mkdir_mode", "tests/test_local.py::TestLocalPath::test_str_getitem", "tests/test_local.py::TestLocalPath::test_fspath", "tests/test_local.py::TestLocalMachine::test_getattr", "tests/test_local.py::TestLocalMachine::test_imports", "tests/test_local.py::TestLocalMachine::test_get", "tests/test_local.py::TestLocalMachine::test_shadowed_by_dir", "tests/test_local.py::TestLocalMachine::test_repr_command", "tests/test_local.py::TestLocalMachine::test_cwd", "tests/test_local.py::TestLocalMachine::test_mixing_chdir", "tests/test_local.py::TestLocalMachine::test_contains", "tests/test_local.py::TestLocalMachine::test_path", "tests/test_local.py::TestLocalMachine::test_glob_spaces", "tests/test_local.py::TestLocalMachine::test_env", "tests/test_local.py::TestLocalMachine::test_local", "tests/test_local.py::TestLocalMachine::test_piping", "tests/test_local.py::TestLocalMachine::test_redirection", "tests/test_local.py::TestLocalMachine::test_popen", "tests/test_local.py::TestLocalMachine::test_run", "tests/test_local.py::TestLocalMachine::test_timeout", "tests/test_local.py::TestLocalMachine::test_pipe_stderr", "tests/test_local.py::TestLocalMachine::test_fair_error_attribution", "tests/test_local.py::TestLocalMachine::test_iter_lines_timeout", "tests/test_local.py::TestLocalMachine::test_iter_lines_buffer_size", "tests/test_local.py::TestLocalMachine::test_iter_lines_timeout_by_type", "tests/test_local.py::TestLocalMachine::test_iter_lines_error", "tests/test_local.py::TestLocalMachine::test_iter_lines_line_timeout", "tests/test_local.py::TestLocalMachine::test_modifiers", "tests/test_local.py::TestLocalMachine::test_tee_modifier", "tests/test_local.py::TestLocalMachine::test_tee_race", "tests/test_local.py::TestLocalMachine::test_logger_pipe", "tests/test_local.py::TestLocalMachine::test_logger_pipe_line_timeout", "tests/test_local.py::TestLocalMachine::test_arg_expansion", "tests/test_local.py::TestLocalMachine::test_session", "tests/test_local.py::TestLocalMachine::test_quoting", "tests/test_local.py::TestLocalMachine::test_exception_pickling", "tests/test_local.py::TestLocalMachine::test_tempdir", "tests/test_local.py::TestLocalMachine::test_direct_open_tmpdir", "tests/test_local.py::TestLocalMachine::test_read_write_str", "tests/test_local.py::TestLocalMachine::test_read_write_unicode", "tests/test_local.py::TestLocalMachine::test_read_write_bin", "tests/test_local.py::TestLocalMachine::test_links", "tests/test_local.py::TestLocalMachine::test_list_processes", "tests/test_local.py::TestLocalMachine::test_pgrep", "tests/test_local.py::TestLocalMachine::test_local_daemon", "tests/test_local.py::TestLocalMachine::test_atomic_file", "tests/test_local.py::TestLocalMachine::test_atomic_file2", "tests/test_local.py::TestLocalMachine::test_pid_file", "tests/test_local.py::TestLocalMachine::test_atomic_counter", "tests/test_local.py::TestLocalMachine::test_atomic_counter2", "tests/test_local.py::TestLocalMachine::test_bound_env", "tests/test_local.py::TestLocalMachine::test_nesting_lists_as_argv", "tests/test_local.py::TestLocalMachine::test_contains_ls", "tests/test_local.py::TestLocalMachine::test_issue_139", "tests/test_local.py::TestLocalMachine::test_pipeline_failure", "tests/test_local.py::TestLocalMachine::test_cmd", "tests/test_local.py::TestLocalMachine::test_pipeline_retcode", "tests/test_local.py::TestLocalMachine::test_pipeline_stdin", "tests/test_local.py::TestLocalMachine::test_run_bg", "tests/test_local.py::TestLocalMachine::test_run_fg", "tests/test_local.py::TestLocalMachine::test_run_tee", "tests/test_local.py::TestLocalMachine::test_run_tf", "tests/test_local.py::TestLocalMachine::test_run_retcode", "tests/test_local.py::TestLocalMachine::test_run_nohup", "tests/test_local.py::TestLocalEncoding::test_inout_rich", "tests/test_local.py::TestLocalEncoding::test_out_rich", "tests/test_local.py::TestLocalEncoding::test_runfile_rich", "tests/test_local.py::test_local_glob_path" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-06-22T20:56:50Z"
mit
tomerfiliba__plumbum-621
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f65c073..e53a554 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,7 +33,7 @@ jobs: - name: pylint run: | echo "::add-matcher::$GITHUB_WORKSPACE/.github/matchers/pylint.json" - pipx run nox -s pylint + pipx run --python python nox -s pylint tests: name: Tests on 🐍 ${{ matrix.python-version }} ${{ matrix.os }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3f5f487..3770b2e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -81,7 +81,6 @@ repos: - id: python-check-blanket-noqa - id: python-check-blanket-type-ignore - id: python-no-log-warn - - id: python-no-eval - id: python-use-type-annotations - id: rst-backticks - id: rst-directive-colons diff --git a/plumbum/cli/application.py b/plumbum/cli/application.py index 788ccb5..d83e6f1 100644 --- a/plumbum/cli/application.py +++ b/plumbum/cli/application.py @@ -501,6 +501,12 @@ class Application: ) m = inspect.getfullargspec(self.main) + + if sys.version_info < (3, 10): + sig = inspect.signature(self.main) + else: + sig = inspect.signature(self.main, eval_str=True) + max_args = sys.maxsize if m.varargs else len(m.args) - 1 min_args = len(m.args) - 1 - (len(m.defaults) if m.defaults else 0) if len(tailargs) < min_args: @@ -530,17 +536,24 @@ class Application: m.varargs, ) - elif hasattr(m, "annotations"): + elif hasattr(m, "annotations") and m.annotations: args_names = list(m.args[1:]) positional = [None] * len(args_names) varargs = None # All args are positional, so convert kargs to positional for item in m.annotations: + annotation = ( + sig.parameters[item].annotation + if item != "return" + else sig.return_annotation + ) + if sys.version_info < (3, 10) and isinstance(annotation, str): + annotation = eval(annotation) if item == m.varargs: - varargs = m.annotations[item] + varargs = annotation elif item != "return": - positional[args_names.index(item)] = m.annotations[item] + positional[args_names.index(item)] = annotation tailargs = self._positional_validate( tailargs, positional, varargs, m.args[1:], m.varargs diff --git a/pyproject.toml b/pyproject.toml index bf9042f..8a7ee77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -106,4 +106,5 @@ messages_control.disable = [ "unidiomatic-typecheck", # TODO: might be able to remove "unnecessary-lambda-assignment", # TODO: 4 instances "unused-import", # identical to flake8 but has typing false positives + "eval-used", # Needed for Python <3.10 annotations ]
tomerfiliba/plumbum
da87b67d3efcaa61554756d56775ac44a3379c00
diff --git a/tests/test_3_cli.py b/tests/test_3_cli.py index 9b8af84..279a56b 100644 --- a/tests/test_3_cli.py +++ b/tests/test_3_cli.py @@ -14,7 +14,7 @@ class TestProg3: class Main4Validator(cli.Application): - def main(self, myint: int, myint2: int, *mylist: int) -> None: + def main(self, myint: int, myint2: int, *mylist: "int") -> None: print(myint, myint2, mylist)
Future support for from __future__ import annotations I added `YAMLWizard` to some code using plumbum's `cli`, and after added the required `__future__` line I discovered that my `main` function wouldn't typecheck its command line arguments anymore. Picture something like this: ``` from __future__ import annotations from plumbum import cli # import YAMLWizard # skipped for simplicity class app(cli.Application): def main(self, *dirs: cli.ExistingDirectory): pass ``` I hope it's clear what's expected here. Anyhow, what will actually happen due to the `annotations` feature is that the annotations get processed differently, and apparently don't work the same way. So instead of typechecking the command line as expected, the program always terminates with a runtime error that "str object is not callable." Workarounds: 1. Just separate cli.Application and __future__.annotation use into separate modules. It's what I'm doing now. 2. Don't typecheck the command line. I did that for a bit, but it's annoying.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_3_cli.py::TestProg4::test_prog" ]
[ "tests/test_3_cli.py::TestProg3::test_prog" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-10-05T21:19:23Z"
mit
tomerfiliba__plumbum-661
diff --git a/docs/index.rst b/docs/index.rst index 78bf283..c1b2ed0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -166,7 +166,7 @@ that I wrote for `RPyC <https://rpyc.readthedocs.io/>`_. When I combined the two Credits ======= -The project has been inspired by **PBS** (now called `sh <http://amoffat.github.io/sh/>`_) +The project has been inspired by **PBS** (now called `sh <http://sh.rtfd.org>`_) of `Andrew Moffat <https://github.com/amoffat>`_, and has borrowed some of his ideas (namely treating programs like functions and the nice trick for importing commands). However, I felt there was too much magic going on in PBS, diff --git a/plumbum/commands/base.py b/plumbum/commands/base.py index 52c0f26..3172c52 100644 --- a/plumbum/commands/base.py +++ b/plumbum/commands/base.py @@ -398,8 +398,6 @@ class Pipeline(BaseCommand): dstproc = self.dstcmd.popen(**kwargs) # allow p1 to receive a SIGPIPE if p2 exits srcproc.stdout.close() - if srcproc.stderr is not None: - dstproc.stderr = srcproc.stderr if srcproc.stdin and src_kwargs.get("stdin") != PIPE: srcproc.stdin.close() dstproc.srcproc = srcproc diff --git a/plumbum/commands/modifiers.py b/plumbum/commands/modifiers.py index 98b3749..af59071 100644 --- a/plumbum/commands/modifiers.py +++ b/plumbum/commands/modifiers.py @@ -226,6 +226,7 @@ class _TEE(ExecutionModifier): buf.append(data) + p.wait() # To get return code in p stdout = "".join([x.decode("utf-8") for x in outbuf]) stderr = "".join([x.decode("utf-8") for x in errbuf]) return p.returncode, stdout, stderr diff --git a/plumbum/commands/processes.py b/plumbum/commands/processes.py index 802ede4..89c2763 100644 --- a/plumbum/commands/processes.py +++ b/plumbum/commands/processes.py @@ -18,14 +18,44 @@ def _check_process(proc, retcode, timeout, stdout, stderr): return proc.returncode, stdout, stderr +def _get_piped_streams(proc): + """Get a list of all valid standard streams for proc that were opened with PIPE option. + + If proc was started from a Pipeline command, this function assumes it will have a + "srcproc" member pointing to the previous command in the pipeline. That link will + be used to traverse all started processes started from the pipeline, the list will + include stdout/stderr streams opened as PIPE for all commands in the pipeline. + If that was not the case, some processes could write to pipes no one reads from + which would result in process stalling after the pipe's buffer is filled. + + Streams that were closed (because they were redirected to the input of a subsequent command) + are not included in the result + """ + streams = [] + + def add_stream(type_, stream): + if stream is None or stream.closed: + return + streams.append((type_, stream)) + + while proc: + add_stream(1, proc.stderr) + add_stream(0, proc.stdout) + proc = getattr(proc, "srcproc", None) + + return streams + + def _iter_lines_posix(proc, decode, linesize, line_timeout=None): from selectors import EVENT_READ, DefaultSelector + streams = _get_piped_streams(proc) + # Python 3.4+ implementation def selector(): sel = DefaultSelector() - sel.register(proc.stdout, EVENT_READ, 0) - sel.register(proc.stderr, EVENT_READ, 1) + for stream_type, stream in streams: + sel.register(stream, EVENT_READ, stream_type) while True: ready = sel.select(line_timeout) if not ready and line_timeout: @@ -41,10 +71,9 @@ def _iter_lines_posix(proc, decode, linesize, line_timeout=None): yield ret if proc.poll() is not None: break - for line in proc.stdout: - yield 0, decode(line) - for line in proc.stderr: - yield 1, decode(line) + for stream_type, stream in streams: + for line in stream: + yield stream_type, decode(line) def _iter_lines_win32(proc, decode, linesize, line_timeout=None):
tomerfiliba/plumbum
668d98aea069f7b4ca20607694038166a93a1cf2
diff --git a/tests/test_local.py b/tests/test_local.py index 889624a..3b5eb51 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -607,7 +607,7 @@ class TestLocalMachine: @skip_on_windows def test_modifiers(self): - from plumbum.cmd import grep, ls + from plumbum.cmd import cat, grep, ls f = (ls["-a"] | grep["\\.py"]) & BG f.wait() @@ -615,11 +615,17 @@ class TestLocalMachine: command = ls["-a"] | grep["local"] command_false = ls["-a"] | grep["not_a_file_here"] + command_false_2 = command_false | cat command & FG assert command & TF assert not (command_false & TF) + assert not (command_false_2 & TF) assert command & RETCODE == 0 assert command_false & RETCODE == 1 + assert command_false_2 & RETCODE == 1 + assert (command & TEE)[0] == 0 + assert (command_false & TEE(retcode=None))[0] == 1 + assert (command_false_2 & TEE(retcode=None))[0] == 1 @skip_on_windows def test_tee_modifier(self, capfd): diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py new file mode 100644 index 0000000..8ea47e2 --- /dev/null +++ b/tests/test_pipelines.py @@ -0,0 +1,87 @@ +from typing import List, Tuple + +import pytest + +import plumbum +from plumbum._testtools import skip_on_windows +from plumbum.commands import BaseCommand + + +@skip_on_windows [email protected](3) +def test_draining_stderr(generate_cmd, process_cmd): + stdout, stderr = get_output_with_iter_lines( + generate_cmd | process_cmd | process_cmd + ) + expected_output = {f"generated {i}" for i in range(5000)} + expected_output.update(f"consumed {i}" for i in range(5000)) + assert set(stderr) - expected_output == set() + assert len(stderr) == 15000 + assert len(stdout) == 5000 + + +@skip_on_windows [email protected](3) +def test_draining_stderr_with_stderr_redirect(tmp_path, generate_cmd, process_cmd): + stdout, stderr = get_output_with_iter_lines( + generate_cmd | (process_cmd >= str(tmp_path / "output.txt")) | process_cmd + ) + expected_output = {f"generated {i}" for i in range(5000)} + expected_output.update(f"consumed {i}" for i in range(5000)) + assert set(stderr) - expected_output == set() + assert len(stderr) == 10000 + assert len(stdout) == 5000 + + +@skip_on_windows [email protected](3) +def test_draining_stderr_with_stdout_redirect(tmp_path, generate_cmd, process_cmd): + stdout, stderr = get_output_with_iter_lines( + generate_cmd | process_cmd | process_cmd > str(tmp_path / "output.txt") + ) + expected_output = {f"generated {i}" for i in range(5000)} + expected_output.update(f"consumed {i}" for i in range(5000)) + assert set(stderr) - expected_output == set() + assert len(stderr) == 15000 + assert len(stdout) == 0 + + [email protected]() +def generate_cmd(tmp_path): + generate = tmp_path / "generate.py" + generate.write_text( + """\ +import sys +for i in range(5000): + print("generated", i, file=sys.stderr) + print(i) +""" + ) + return plumbum.local["python"][generate] + + [email protected]() +def process_cmd(tmp_path): + process = tmp_path / "process.py" + process.write_text( + """\ +import sys +for line in sys.stdin: + i = line.strip() + print("consumed", i, file=sys.stderr) + print(i) +""" + ) + return plumbum.local["python"][process] + + +def get_output_with_iter_lines(cmd: BaseCommand) -> Tuple[List[str], List[str]]: + stderr, stdout = [], [] + proc = cmd.popen() + for stdout_line, stderr_line in proc.iter_lines(retcode=[0, None]): + if stderr_line: + stderr.append(stderr_line) + if stdout_line: + stdout.append(stdout_line) + proc.wait() + return stdout, stderr
TEE return code for pipeline is wrong when non-last command fails Based on #145 ```py from plumbum.cmd import yes, head from plumbum import TEE, RETCODE chain = yes['-x'] | head print(chain & RETCODE) # prints 1 with chain.bgrun(retcode=None) as p: print(p.run()[0]) # prints 1 print((chain & TEE(retcode=None))[0]) # prints 0 ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_local.py::TestLocalMachine::test_modifiers", "tests/test_pipelines.py::test_draining_stderr", "tests/test_pipelines.py::test_draining_stderr_with_stderr_redirect", "tests/test_pipelines.py::test_draining_stderr_with_stdout_redirect" ]
[ "tests/test_local.py::TestLocalPopen::test_contextmanager", "tests/test_local.py::TestLocalPath::test_name", "tests/test_local.py::TestLocalPath::test_dirname", "tests/test_local.py::TestLocalPath::test_uri", "tests/test_local.py::TestLocalPath::test_pickle", "tests/test_local.py::TestLocalPath::test_empty", "tests/test_local.py::TestLocalPath::test_chown", "tests/test_local.py::TestLocalPath::test_split", "tests/test_local.py::TestLocalPath::test_suffix", "tests/test_local.py::TestLocalPath::test_newname", "tests/test_local.py::TestLocalPath::test_relative_to", "tests/test_local.py::TestLocalPath::test_read_write", "tests/test_local.py::TestLocalPath::test_parts", "tests/test_local.py::TestLocalPath::test_iterdir", "tests/test_local.py::TestLocalPath::test_stem", "tests/test_local.py::TestLocalPath::test_root_drive", "tests/test_local.py::TestLocalPath::test_compare_pathlib", "tests/test_local.py::TestLocalPath::test_suffix_expected", "tests/test_local.py::TestLocalPath::test_touch", "tests/test_local.py::TestLocalPath::test_copy_override", "tests/test_local.py::TestLocalPath::test_copy_nonexistant_dir", "tests/test_local.py::TestLocalPath::test_unlink", "tests/test_local.py::TestLocalPath::test_unhashable", "tests/test_local.py::TestLocalPath::test_getpath", "tests/test_local.py::TestLocalPath::test_path_dir", "tests/test_local.py::TestLocalPath::test_mkdir", "tests/test_local.py::TestLocalPath::test_mkdir_mode", "tests/test_local.py::TestLocalPath::test_str_getitem", "tests/test_local.py::TestLocalPath::test_fspath", "tests/test_local.py::TestLocalMachine::test_getattr", "tests/test_local.py::TestLocalMachine::test_imports", "tests/test_local.py::TestLocalMachine::test_pathlib", "tests/test_local.py::TestLocalMachine::test_get", "tests/test_local.py::TestLocalMachine::test_shadowed_by_dir", "tests/test_local.py::TestLocalMachine::test_repr_command", "tests/test_local.py::TestLocalMachine::test_cwd", "tests/test_local.py::TestLocalMachine::test_mixing_chdir", "tests/test_local.py::TestLocalMachine::test_contains", "tests/test_local.py::TestLocalMachine::test_path", "tests/test_local.py::TestLocalMachine::test_glob_spaces", "tests/test_local.py::TestLocalMachine::test_env", "tests/test_local.py::TestLocalMachine::test_local", "tests/test_local.py::TestLocalMachine::test_piping", "tests/test_local.py::TestLocalMachine::test_redirection", "tests/test_local.py::TestLocalMachine::test_popen", "tests/test_local.py::TestLocalMachine::test_run", "tests/test_local.py::TestLocalMachine::test_timeout", "tests/test_local.py::TestLocalMachine::test_pipe_stderr", "tests/test_local.py::TestLocalMachine::test_fair_error_attribution", "tests/test_local.py::TestLocalMachine::test_iter_lines_timeout", "tests/test_local.py::TestLocalMachine::test_iter_lines_buffer_size", "tests/test_local.py::TestLocalMachine::test_iter_lines_timeout_by_type", "tests/test_local.py::TestLocalMachine::test_iter_lines_error", "tests/test_local.py::TestLocalMachine::test_iter_lines_line_timeout", "tests/test_local.py::TestLocalMachine::test_tee_modifier", "tests/test_local.py::TestLocalMachine::test_tee_race", "tests/test_local.py::TestLocalMachine::test_redirection_stdin_modifiers_fg[modifier0-None]", "tests/test_local.py::TestLocalMachine::test_redirection_stdin_modifiers_fg[modifier1-True]", "tests/test_local.py::TestLocalMachine::test_redirection_stdin_modifiers_fg[modifier2-0]", "tests/test_local.py::TestLocalMachine::test_redirection_stdin_modifiers_fg[modifier3-expected3]", "tests/test_local.py::TestLocalMachine::test_logger_pipe", "tests/test_local.py::TestLocalMachine::test_logger_pipe_line_timeout", "tests/test_local.py::TestLocalMachine::test_arg_expansion", "tests/test_local.py::TestLocalMachine::test_session", "tests/test_local.py::TestLocalMachine::test_quoting", "tests/test_local.py::TestLocalMachine::test_exception_pickling", "tests/test_local.py::TestLocalMachine::test_tempdir", "tests/test_local.py::TestLocalMachine::test_direct_open_tmpdir", "tests/test_local.py::TestLocalMachine::test_read_write_str", "tests/test_local.py::TestLocalMachine::test_read_write_unicode", "tests/test_local.py::TestLocalMachine::test_read_write_bin", "tests/test_local.py::TestLocalMachine::test_links", "tests/test_local.py::TestLocalMachine::test_list_processes", "tests/test_local.py::TestLocalMachine::test_pgrep", "tests/test_local.py::TestLocalMachine::test_local_daemon", "tests/test_local.py::TestLocalMachine::test_atomic_file", "tests/test_local.py::TestLocalMachine::test_atomic_file2", "tests/test_local.py::TestLocalMachine::test_pid_file", "tests/test_local.py::TestLocalMachine::test_atomic_counter", "tests/test_local.py::TestLocalMachine::test_atomic_counter2", "tests/test_local.py::TestLocalMachine::test_bound_env", "tests/test_local.py::TestLocalMachine::test_nesting_lists_as_argv", "tests/test_local.py::TestLocalMachine::test_contains_ls", "tests/test_local.py::TestLocalMachine::test_issue_139", "tests/test_local.py::TestLocalMachine::test_pipeline_failure", "tests/test_local.py::TestLocalMachine::test_cmd", "tests/test_local.py::TestLocalMachine::test_pipeline_retcode", "tests/test_local.py::TestLocalMachine::test_pipeline_stdin", "tests/test_local.py::TestLocalMachine::test_run_bg", "tests/test_local.py::TestLocalMachine::test_run_fg", "tests/test_local.py::TestLocalMachine::test_run_tee", "tests/test_local.py::TestLocalMachine::test_run_tf", "tests/test_local.py::TestLocalMachine::test_run_retcode", "tests/test_local.py::TestLocalMachine::test_run_nohup", "tests/test_local.py::TestLocalEncoding::test_inout_rich", "tests/test_local.py::TestLocalEncoding::test_out_rich", "tests/test_local.py::TestLocalEncoding::test_runfile_rich", "tests/test_local.py::test_local_glob_path" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-10-31T20:34:24Z"
mit
tommyjcarpenter__osmtogeojson-8
diff --git a/Changelog.md b/Changelog.md index 60d3034..0be1e85 100644 --- a/Changelog.md +++ b/Changelog.md @@ -2,7 +2,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) -and this project adheres to [Semantic Versioning](http://semver.org/). +and this project will later on adhere to [Semantic Versioning](http://semver.org/). + +Note: A change of a minor/patch version still may result in changed GeoJSON export results! ## [0.0.1] * Project creation +## [0.0.2] + * Prefix osm "id" property by an "at"-sign (fixes #5) + * Create GeometryCollection for relations with mixed member types (fixes #3) diff --git a/osmtogeojson/osmtogeojson.py b/osmtogeojson/osmtogeojson.py index 36eb032..6a05cd3 100644 --- a/osmtogeojson/osmtogeojson.py +++ b/osmtogeojson/osmtogeojson.py @@ -1,5 +1,8 @@ +import logging from osmtogeojson import merge +logger = logging.getLogger(__name__) + def _determine_feature_type(way_nodes): # get more advanced??? if way_nodes[0] == way_nodes[-1]: @@ -65,6 +68,7 @@ def _process_relations(resulting_geojson, relation_storage, way_storage, node_st way_types = [] way_coordinate_blocks = [] + only_way_members = True for mem in r["members"]: if mem["type"] == "way": way_id = mem["ref"] @@ -73,7 +77,7 @@ def _process_relations(resulting_geojson, relation_storage, way_storage, node_st way_coordinate_blocks.append(processed["geometry"]["coordinates"]) ways_used_in_relations[way_id] = 1 else: - print(mem["type"]) + only_way_members = False rel["geometry"] = {} @@ -86,8 +90,31 @@ def _process_relations(resulting_geojson, relation_storage, way_storage, node_st rel["geometry"]["coordinates"] = [x for x in way_coordinate_blocks] merge.merge_line_string(rel) else: - print(way_types) - + # relation does not consist of Polygons or LineStrings only... + # In this case, overpass reports every individual member with its relation reference + # Another option would be to export such a relation as GeometryCollection + + rel["geometry"]["type"] = "GeometryCollection" + member_geometries = [] + for mem in r["members"]: + if mem["type"] == "way": + way_id = mem["ref"] + processed = _process_single_way(way_id, way_storage[way_id], node_storage, nodes_used_in_ways) + member_geometries.append(processed["geometry"]) + elif mem["type"] == "node": + node_id = mem["ref"] + node = node_storage[node_id] + geometry = {} + geometry["type"] = "Point" + geometry["coordinates"] = [node["lon"], node["lat"]] + member_geometries.append(geometry) + # Well, used_in_rels, but we want to ignore it as well, don't we? + nodes_used_in_ways[node_id] = 1 + else: + logger.warn("Relations members not yet handled (%s)", rel_id) + + rel["geometry"]["geometries"] = member_geometries + resulting_geojson["features"].append(rel) return ways_used_in_relations diff --git a/setup.py b/setup.py index 78fbb6b..ef27f82 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages setup( name='osmtogeojson', - version='0.0.1', + version='0.0.2', packages=find_packages(exclude=["tests.*", "tests"]), author="Tommy Carpenter", author_email="",
tommyjcarpenter/osmtogeojson
128ec15acd298926dc9070b91ce45546a581b90f
diff --git a/tests/fixtures/geomcollection_geojson.json b/tests/fixtures/geomcollection_geojson.json new file mode 100644 index 0000000..a7e435f --- /dev/null +++ b/tests/fixtures/geomcollection_geojson.json @@ -0,0 +1,82 @@ +{ + "type": "FeatureCollection", + "features": [ + { + "type": "Feature", + "id": "relation/5576903", + "properties": { + "@id": "relation/5576903", + "amenity": "parking", + "fee": "yes", + "name": "Schillerplatz", + "opening_hours": "24/7", + "operator": "APCOA PARKING Deutschland GmbH", + "parking": "underground", + "site": "parking", + "type": "site", + "url": "https://service.stuttgart.de/lhs-services/ivlz/index.php?uid=35&objectid=108523&objecttype=dept&page=svc&servicetype=parking&serviceid=9356&detailid=65&showservice=1", + "website": "https://www.apcoa.de/parken-in/stuttgart/schillerplatz.html" + }, + "geometry": { + "geometries": [ + { + "coordinates": [ + [ + 9.1785771, + 48.7769778 + ], + [ + 9.1785466, + 48.7769462 + ] + ], + "type": "LineString" + }, + { + "coordinates": [ + [ + [ + 9.178599, + 48.7769776 + ], + [ + 9.1785505, + 48.7769354 + ], + [ + 9.1785521, + 48.7769096 + ], + [ + 9.1785628, + 48.776876 + ], + [ + 9.1786147, + 48.7768176 + ], + [ + 9.1786676, + 48.7767825 + ], + [ + 9.178599, + 48.7769776 + ] + ] + ], + "type": "Polygon" + }, + { + "coordinates": [ + 9.1786676, + 48.7767825 + ], + "type": "Point" + } + ], + "type": "GeometryCollection" + } + } + ] +} \ No newline at end of file diff --git a/tests/fixtures/geomcollection_overpass.json b/tests/fixtures/geomcollection_overpass.json new file mode 100644 index 0000000..c6e803f --- /dev/null +++ b/tests/fixtures/geomcollection_overpass.json @@ -0,0 +1,144 @@ +{ + "version": 0.6, + "generator": "Overpass API 0.7.55.6 486819c8", + "osm3s": { + "timestamp_osm_base": "2019-04-25T18:26:02Z", + "copyright": "The data included in this document is from www.openstreetmap.org. The data is made available under ODbL." + }, + "notes": "This is a shortened test fixture, the real feature has some more members...", + "elements": [ +{ + "type": "node", + "id": 20850150, + "lat": 48.7767825, + "lon": 9.1786676, + "tags": { + "amenity": "parking_entrance", + "foot": "yes", + "maxheight": "2", + "parking": "underground" + } +}, +{ + "type": "node", + "id": 3801571628, + "lat": 48.7768176, + "lon": 9.1786147 +}, +{ + "type": "node", + "id": 3801571632, + "lat": 48.7768760, + "lon": 9.1785628 +}, +{ + "type": "node", + "id": 3817566170, + "lat": 48.7769096, + "lon": 9.1785521, + "tags": { + "barrier": "lift_gate", + "layer": "-1", + "location": "underground" + } +}, +{ + "type": "node", + "id": 3801571640, + "lat": 48.7769354, + "lon": 9.1785505 +}, +{ + "type": "node", + "id": 3801569051, + "lat": 48.7769776, + "lon": 9.1785990 +}, +{ + "type": "node", + "id": 3801571641, + "lat": 48.7769462, + "lon": 9.1785466 +}, +{ + "type": "node", + "id": 3801571647, + "lat": 48.7769778, + "lon": 9.1785771 +}, +{ + "type": "way", + "id": 376770460, + "nodes": [ + 3801571647, + 3801571641 + ], + "tags": { + "covered": "yes", + "highway": "footway", + "indoor": "yes", + "layer": "-2", + "location": "underground", + "ref": "Ebene B" + } +}, +{ + "type": "way", + "id": 376770534, + "nodes": [ + 3801569051, + 3801571640, + 3817566170, + 3801571632, + 3801571628, + 20850150, + 3801569051 + ], + "tags": { + "highway": "service", + "incline": "up", + "layer": "-1", + "location": "underground", + "oneway": "yes", + "service": "driveway", + "sidewalk": "right", + "tunnel": "yes" + } +}, +{ + "type": "relation", + "id": 5576903, + "members": [ + { + "type": "way", + "ref": 376770460, + "role": "" + }, + { + "type": "way", + "ref": 376770534, + "role": "" + }, + { + "type": "node", + "ref": 20850150, + "role": "exit" + } + ], + "tags": { + "amenity": "parking", + "fee": "yes", + "name": "Schillerplatz", + "opening_hours": "24/7", + "operator": "APCOA PARKING Deutschland GmbH", + "parking": "underground", + "site": "parking", + "type": "site", + "url": "https://service.stuttgart.de/lhs-services/ivlz/index.php?uid=35&objectid=108523&objecttype=dept&page=svc&servicetype=parking&serviceid=9356&detailid=65&showservice=1", + "website": "https://www.apcoa.de/parken-in/stuttgart/schillerplatz.html" + } +} + + ] +} + diff --git a/tests/test_conversion.py b/tests/test_conversion.py new file mode 100644 index 0000000..69eafb8 --- /dev/null +++ b/tests/test_conversion.py @@ -0,0 +1,31 @@ +import json +import unittest + +from osmtogeojson import osmtogeojson + +class ConversionTest(unittest.TestCase): + + # We want to see the differences + maxDiff = None + + def test_relation_with_different_member_types_becomes_GeometryCollection(self): + self.compare_files("geomcollection_overpass.json", "geomcollection_geojson.json") + + def not_yet_test_np(self): + self.compare_files("np_overpass.json", "np_geojson.json") + + def not_yet_test_summitschool(self): + self.compare_files("summitschool_overpass.json", "summitschool_geojson.json") + + def compare_files(self, inputfile, geojsonfile): + with open("tests/fixtures/" + inputfile, "r") as f: + osm_json = json.loads(f.read()) + + with open("tests/fixtures/" + geojsonfile, "r") as f: + expected_geojson = json.loads(f.read()) + + actual_geojson = osmtogeojson.process_osm_json(osm_json) + self.assertEqual(actual_geojson, expected_geojson) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file
Relations with LineString and Polygon members have no geometry If a relation has Polygons and LineStrings way members, the resulting feature has no geometry. tyrasd/osmtogeojson instead seems to export every member as feature with an @relations property. osmtogeojson should either behave accordingly or provide all members geometries as a GeometryCollection. Example for [relation/5576903](https://openstreetmap.org/relation/5576903): ``` { "type": "Feature", "properties": { "@id": "way/376770457", "covered": "yes", "highway": "service", "indoor": "yes", "layer": "-2", "location": "underground", "oneway": "yes", "ref": "Ebene B", "service": "parking_aisle", "sidewalk": "left", "@relations": [ { "role": "", "rel": 5576903, "reltags": { "amenity": "parking", "fee": "yes", "name": "Schillerplatz", "opening_hours": "24/7", "operator": "APCOA PARKING Deutschland GmbH", "parking": "underground", "site": "parking", "type": "site", "url": "https://service.stuttgart.de/lhs-services/ivlz/index.php?uid=35&objectid=108523&objecttype=dept&page=svc&servicetype=parking&serviceid=9356&detailid=65&showservice=1", "website": "https://www.apcoa.de/parken-in/stuttgart/schillerplatz.html" } } ] }, "geometry": { "type": "Polygon", "coordinates": [ [ [ 9.178458, 48.7770338 ], ... ]] } } ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_conversion.py::ConversionTest::test_relation_with_different_member_types_becomes_GeometryCollection" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-04-26T12:03:29Z"
mit
tonioo__sievelib-79
diff --git a/README.rst b/README.rst index 68ff377..e278c60 100644 --- a/README.rst +++ b/README.rst @@ -38,12 +38,13 @@ The following extensions are also supported: * Copying Without Side Effects (`RFC 3894 <https://tools.ietf.org/html/rfc3894>`_) * Body (`RFC 5173 <https://tools.ietf.org/html/rfc5173>`_) -* Date and Index (`RFC 5260 <https://tools.ietf.org/html/rfc5260>`_) * Vacation (`RFC 5230 <http://tools.ietf.org/html/rfc5230>`_) +* Relational (`RFC 5231 <https://tools.ietf.org/html/rfc5231>`_) * Imap4flags (`RFC 5232 <https://tools.ietf.org/html/rfc5232>`_) The following extensions are partially supported: +* Date and Index (`RFC 5260 <https://tools.ietf.org/html/rfc5260>`_) * Checking Mailbox Status and Accessing Mailbox Metadata (`RFC 5490 <https://tools.ietf.org/html/rfc5490>`_) Extending the parser diff --git a/sievelib/commands.py b/sievelib/commands.py index 7f6df2e..d26bcb5 100644 --- a/sievelib/commands.py +++ b/sievelib/commands.py @@ -98,10 +98,21 @@ address_part = {"name": "address-part", "values": [":localpart", ":domain", ":all"], "type": ["tag"], "required": False} -match_type = {"name": "match-type", - "values": [":is", ":contains", ":matches"], - "type": ["tag"], - "required": False} +match_type = { + "name": "match-type", + "values": [":is", ":contains", ":matches"], + "extension_values": { + ":count": "relational", + ":value": "relational" + }, + "extra_arg": { + "type": "string", + "values": ['"gt"', '"ge"', '"lt"', '"le"', '"eq"', '"ne"'], + "valid_for": [":count", ":value"] + }, + "type": ["tag"], + "required": False +} class Command(object): @@ -343,9 +354,17 @@ class Command(object): :param value: the value to check :return: True on succes, False otherwise """ - if "values" not in arg: + if "values" not in arg and "extension_values" not in arg: + return True + if "values" in arg and value.lower() in arg["values"]: return True - return value.lower() in arg["values"] + if "extension_values" in arg: + extension = arg["extension_values"].get(value.lower()) + if extension: + if extension not in RequireCommand.loaded_extensions: + raise ExtensionNotLoaded(extension) + return True + return False def __is_valid_type(self, typ, typlist): """ Check if type is valid based on input type list @@ -431,7 +450,6 @@ class Command(object): condition = ( atype in curarg["type"] and - ("values" not in curarg or avalue in curarg["values"]) and self.__is_valid_value_for_arg(curarg, avalue) ) if condition: @@ -892,6 +910,59 @@ class HasflagCommand(TestCommand): self.rargs_cnt = 1 +class DateCommand(TestCommand): + """date command, part of the date extension. + + https://tools.ietf.org/html/rfc5260#section-4 + """ + + extension = "date" + args_definition = [ + {"name": "zone", + "type": ["tag"], + "write_tag": True, + "values": [":zone", ":originalzone"], + "extra_arg": {"type": "string", "valid_for": [":zone"]}, + "required": False}, + comparator, + match_type, + {"name": "header-name", + "type": ["string"], + "required": True}, + {"name": "date-part", + "type": ["string"], + "required": True}, + {"name": "key-list", + "type": ["string", "stringlist"], + "required": True} + ] + + +class CurrentdateCommand(TestCommand): + """currentdate command, part of the date extension. + + http://tools.ietf.org/html/rfc5260#section-5 + """ + + extension = "date" + args_definition = [ + {"name": "zone", + "type": ["tag"], + "write_tag": True, + "values": [":zone"], + "extra_arg": {"type": "string"}, + "required": False}, + comparator, + match_type, + {"name": "date-part", + "type": ["string"], + "required": True}, + {"name": "key-list", + "type": ["string", "stringlist"], + "required": True} + ] + + class VacationCommand(ActionCommand): args_definition = [ {"name": "subject", @@ -937,7 +1008,7 @@ class VacationCommand(ActionCommand): class SetCommand(ControlCommand): - """currentdate command, part of the variables extension + """set command, part of the variables extension http://tools.ietf.org/html/rfc5229 """ @@ -953,37 +1024,6 @@ class SetCommand(ControlCommand): ] -class CurrentdateCommand(ControlCommand): - - """currentdate command, part of the date extension - - http://tools.ietf.org/html/rfc5260#section-5 - """ - - extension = "date" - accept_children = True - args_definition = [ - {"name": "zone", - "type": ["tag"], - "write_tag": True, - "values": [":zone"], - "extra_arg": {"type": "string"}, - "required": False}, - {"name": "match-value", - "type": ["tag"], - "required": True}, - {"name": "comparison", - "type": ["string"], - "required": True}, - {"name": "match-against", - "type": ["string"], - "required": True}, - {"name": "match-against-field", - "type": ["string"], - "required": True} - ] - - def add_commands(cmds): """ Adds one or more commands to the module namespace.
tonioo/sievelib
64ff2381866f7c820d579ce980eeec87d76a3f0d
diff --git a/sievelib/tests/test_parser.py b/sievelib/tests/test_parser.py index 65e6393..430cdf1 100644 --- a/sievelib/tests/test_parser.py +++ b/sievelib/tests/test_parser.py @@ -779,10 +779,20 @@ if exists ["subject"] class DateCommands(SieveTest): + + def test_date_command(self): + self.compilation_ok(b"""require ["date", "relational", "fileinto"]; +if allof(header :is "from" "[email protected]", + date :value "ge" :originalzone "date" "hour" "09", + date :value "lt" :originalzone "date" "hour" "17") +{ fileinto "urgent"; } +""") + def test_currentdate_command(self): self.compilation_ok(b"""require ["date", "relational"]; -if allof ( currentdate :value "ge" "date" "2013-10-23" , currentdate :value "le" "date" "2014-10-12" ) +if allof(currentdate :value "ge" "date" "2013-10-23", + currentdate :value "le" "date" "2014-10-12") { discard; } @@ -791,7 +801,8 @@ if allof ( currentdate :value "ge" "date" "2013-10-23" , currentdate :value "le" def test_currentdate_command_timezone(self): self.compilation_ok(b"""require ["date", "relational"]; -if allof ( currentdate :zone "+0100" :value "ge" "date" "2013-10-23" , currentdate :value "le" "date" "2014-10-12" ) +if allof(currentdate :zone "+0100" :value "ge" "date" "2013-10-23", + currentdate :value "le" "date" "2014-10-12") { discard; } @@ -800,13 +811,22 @@ if allof ( currentdate :zone "+0100" :value "ge" "date" "2013-10-23" , currentda def test_currentdate_norel(self): self.compilation_ok(b"""require ["date"]; -if allof ( - currentdate :zone "+0100" :is "date" "2013-10-23" -) +if allof ( + currentdate :zone "+0100" :is "date" "2013-10-23" +) { discard; }""") + def test_currentdate_extension_not_loaded(self): + self.compilation_ko(b"""require ["date"]; + +if allof ( currentdate :value "ge" "date" "2013-10-23" , currentdate :value "le" "date" "2014-10-12" ) +{ + discard; +} +""") + class VariablesCommands(SieveTest): def test_set_command(self):
Better support of relational and date extensions The current implementation is buggy.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "sievelib/tests/test_parser.py::DateCommands::test_currentdate_extension_not_loaded", "sievelib/tests/test_parser.py::DateCommands::test_date_command" ]
[ "sievelib/tests/test_parser.py::AdditionalCommands::test_add_command", "sievelib/tests/test_parser.py::AdditionalCommands::test_quota_notification", "sievelib/tests/test_parser.py::ValidEncodings::test_utf8_file", "sievelib/tests/test_parser.py::ValidSyntaxes::test_body_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_bracket_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_complex_allof_with_not", "sievelib/tests/test_parser.py::ValidSyntaxes::test_explicit_comparator", "sievelib/tests/test_parser.py::ValidSyntaxes::test_fileinto_create", "sievelib/tests/test_parser.py::ValidSyntaxes::test_hash_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_imap4flags_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_imap4flags_hasflag", "sievelib/tests/test_parser.py::ValidSyntaxes::test_just_one_command", "sievelib/tests/test_parser.py::ValidSyntaxes::test_multiline_string", "sievelib/tests/test_parser.py::ValidSyntaxes::test_multiple_not", "sievelib/tests/test_parser.py::ValidSyntaxes::test_nested_blocks", "sievelib/tests/test_parser.py::ValidSyntaxes::test_non_ordered_args", "sievelib/tests/test_parser.py::ValidSyntaxes::test_reject_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_rfc5228_extended", "sievelib/tests/test_parser.py::ValidSyntaxes::test_singletest_testlist", "sievelib/tests/test_parser.py::ValidSyntaxes::test_string_with_bracket_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_true_test", "sievelib/tests/test_parser.py::ValidSyntaxes::test_truefalse_testlist", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_basic", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_medium", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_limit", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_multiline", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_single_mail_address", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_comma_inside_arguments", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_empty_not", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_empty_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_extra_arg", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_comma_in_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_comma_in_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_parenthesis", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_missing_semicolon", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_missing_semicolon_in_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nested_comments", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_non_ordered_args", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_tests_list2", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonopened_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonopened_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_unclosed_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_unknown_token", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_arg_value", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_arg_value2", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_comparator_value", "sievelib/tests/test_parser.py::LanguageRestrictions::test_exists_get_string_or_list", "sievelib/tests/test_parser.py::LanguageRestrictions::test_fileinto_create_without_fileinto", "sievelib/tests/test_parser.py::LanguageRestrictions::test_fileinto_create_without_mailbox", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_elsif", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_elsif2", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_nested_elsif", "sievelib/tests/test_parser.py::LanguageRestrictions::test_not_included_extension", "sievelib/tests/test_parser.py::LanguageRestrictions::test_test_outside_control", "sievelib/tests/test_parser.py::LanguageRestrictions::test_unexpected_argument", "sievelib/tests/test_parser.py::LanguageRestrictions::test_unknown_control", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_command", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_command_timezone", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_norel", "sievelib/tests/test_parser.py::VariablesCommands::test_set_command", "sievelib/tests/test_parser.py::CopyWithoutSideEffectsTestCase::test_fileinto_with_copy", "sievelib/tests/test_parser.py::CopyWithoutSideEffectsTestCase::test_redirect_with_copy" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-02-25T10:50:21Z"
mit
tonioo__sievelib-82
diff --git a/sievelib/commands.py b/sievelib/commands.py index d26bcb5..c2db5a4 100644 --- a/sievelib/commands.py +++ b/sievelib/commands.py @@ -962,6 +962,22 @@ class CurrentdateCommand(TestCommand): "required": True} ] + def args_as_tuple(self): + """Return arguments as a list.""" + result = ("currentdate", ) + result += ( + ":zone", + self.arguments["zone"].strip('"'), + self.arguments["match-type"], + self.arguments["date-part"].strip('"') + ) + if self.arguments["key-list"].startswith("["): + result = result + tuple( + tools.to_list(self.arguments["key-list"])) + else: + result = result + (self.arguments["key-list"].strip('"'),) + return result + class VacationCommand(ActionCommand): args_definition = [ diff --git a/sievelib/factory.py b/sievelib/factory.py index eb9b32e..8c065aa 100644 --- a/sievelib/factory.py +++ b/sievelib/factory.py @@ -215,6 +215,23 @@ class FiltersSet(object): "stringlist", "[%s]" % (",".join('"%s"' % val for val in c[3:])) ) + elif cname == "currentdate": + cmd = commands.get_command_instance( + "currentdate", ifcontrol, False) + self.require(cmd.extension) + cmd.check_next_arg("tag", c[1]) + cmd.check_next_arg("string", self.__quote_if_necessary(c[2])) + if c[3].startswith(":not"): + comp_tag = c[3].replace("not", "") + negate = True + else: + comp_tag = c[3] + cmd.check_next_arg("tag", comp_tag) + cmd.check_next_arg("string", self.__quote_if_necessary(c[4])) + cmd.check_next_arg( + "stringlist", + "[%s]" % (",".join('"%s"' % val for val in c[5:])) + ) else: # header command fallback if c[1].startswith(':not'): @@ -356,7 +373,8 @@ class FiltersSet(object): commands.SizeCommand, commands.ExistsCommand, commands.BodyCommand, - commands.EnvelopeCommand)): + commands.EnvelopeCommand, + commands.CurrentdateCommand)): args = node.args_as_tuple() if negate: if node.name in ["header", "envelope"]: @@ -367,6 +385,12 @@ class FiltersSet(object): (":not{}".format(args[2][1:]),) + args[3:] ) + elif node.name == "currentdate": + args = ( + args[:3] + + (":not{}".format(args[3][1:]),) + + args[4:] + ) elif node.name == "exists": args = ("not{}".format(args[0]),) + args[1:] negate = False
tonioo/sievelib
a8c63b395ac67830d65670dde026775f1742c6bb
diff --git a/sievelib/tests/test_factory.py b/sievelib/tests/test_factory.py index 42b3964..e2e602d 100644 --- a/sievelib/tests/test_factory.py +++ b/sievelib/tests/test_factory.py @@ -68,6 +68,17 @@ class FactoryTestCase(unittest.TestCase): conditions = self.fs.get_filter_conditions("ruleC") self.assertEqual(orig_conditions, conditions) + orig_conditions = [( + "currentdate", ":zone", "+0100", ":notis", "date", "2019-02-26" + )] + self.fs.addfilter( + "ruleD", + orig_conditions, + [("fileinto", "INBOX")] + ) + conditions = self.fs.get_filter_conditions("ruleD") + self.assertEqual(orig_conditions, conditions) + def test_get_filter_matchtype(self): """Test get_filter_matchtype method.""" self.fs.addfilter( @@ -298,6 +309,21 @@ if anyof (not envelope :is ["From"] ["hello"]) { } """) + def test_add_currentdate_filter(self): + """Add a currentdate filter.""" + self.fs.addfilter( + "test", + [("currentdate", ":zone", "+0100", ":is", "date", "2019-02-26")], + [("fileinto", "INBOX")] + ) + self.assertEqual("{}".format(self.fs), """require ["date", "fileinto"]; + +# Filter: test +if anyof (currentdate :zone "+0100" :is "date" ["2019-02-26"]) { + fileinto "INBOX"; +} +""") + if __name__ == "__main__": unittest.main()
Factory: add support for currentdate test Make the factory module understand the currendate test.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "sievelib/tests/test_factory.py::FactoryTestCase::test_add_currentdate_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_conditions" ]
[ "sievelib/tests/test_factory.py::FactoryTestCase::test_add_body_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_envelope_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_exists_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_exists_filter_with_not", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_filter_unicode", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_header_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_header_filter_with_not", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_notbody_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_notenvelope_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_size_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_disablefilter", "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_actions", "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_matchtype", "sievelib/tests/test_factory.py::FactoryTestCase::test_remove_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_use_action_with_tag" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-02-28T10:45:28Z"
mit
tonioo__sievelib-84
diff --git a/sievelib/commands.py b/sievelib/commands.py index 5a9bf2f..acb4535 100644 --- a/sievelib/commands.py +++ b/sievelib/commands.py @@ -550,10 +550,6 @@ class RequireCommand(ControlCommand): RequireCommand.loaded_extensions += [ext] -class StopCommand(ControlCommand): - args_definition = [] - - class IfCommand(ControlCommand): accept_children = True @@ -614,6 +610,10 @@ class ActionCommand(Command): return (self.name, ) + tuple(args) +class StopCommand(ActionCommand): + args_definition = [] + + class FileintoCommand(ActionCommand): extension = "fileinto" args_definition = [
tonioo/sievelib
37463faa2019c4856620473231d3ad3a8a57858d
diff --git a/sievelib/tests/test_factory.py b/sievelib/tests/test_factory.py index 1da42aa..f425363 100644 --- a/sievelib/tests/test_factory.py +++ b/sievelib/tests/test_factory.py @@ -111,6 +111,14 @@ class FactoryTestCase(unittest.TestCase): self.assertIn(":copy", actions[0]) self.assertIn("Toto", actions[0]) + self.fs.addfilter( + "ruleY", + [("Subject", ":contains", "aaa")], + [("stop",)] + ) + actions = self.fs.get_filter_actions("ruleY") + self.assertIn("stop", actions[0]) + def test_add_header_filter(self): output = six.StringIO() self.fs.addfilter( diff --git a/sievelib/tests/test_parser.py b/sievelib/tests/test_parser.py index d5fa697..8890eac 100644 --- a/sievelib/tests/test_parser.py +++ b/sievelib/tests/test_parser.py @@ -189,7 +189,7 @@ noreply Your email has been canceled ============================ . - stop (type: control) + stop (type: action) else (type: control) reject (type: action) text: @@ -399,7 +399,7 @@ if (type: control) not (type: test) not (type: test) true (type: test) - stop (type: control) + stop (type: action) """) def test_just_one_command(self):
stop action is wrongly implemented It is currently declared as a control command.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_actions", "sievelib/tests/test_parser.py::ValidSyntaxes::test_multiline_string", "sievelib/tests/test_parser.py::ValidSyntaxes::test_multiple_not" ]
[ "sievelib/tests/test_factory.py::FactoryTestCase::test_add_body_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_currentdate_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_envelope_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_exists_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_exists_filter_with_not", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_filter_unicode", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_header_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_header_filter_with_not", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_notbody_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_notenvelope_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_size_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_disablefilter", "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_conditions", "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_matchtype", "sievelib/tests/test_factory.py::FactoryTestCase::test_remove_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_use_action_with_tag", "sievelib/tests/test_parser.py::AdditionalCommands::test_add_command", "sievelib/tests/test_parser.py::AdditionalCommands::test_quota_notification", "sievelib/tests/test_parser.py::ValidEncodings::test_utf8_file", "sievelib/tests/test_parser.py::ValidSyntaxes::test_body_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_bracket_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_complex_allof_with_not", "sievelib/tests/test_parser.py::ValidSyntaxes::test_explicit_comparator", "sievelib/tests/test_parser.py::ValidSyntaxes::test_fileinto_create", "sievelib/tests/test_parser.py::ValidSyntaxes::test_hash_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_imap4flags_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_imap4flags_hasflag", "sievelib/tests/test_parser.py::ValidSyntaxes::test_just_one_command", "sievelib/tests/test_parser.py::ValidSyntaxes::test_nested_blocks", "sievelib/tests/test_parser.py::ValidSyntaxes::test_non_ordered_args", "sievelib/tests/test_parser.py::ValidSyntaxes::test_reject_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_rfc5228_extended", "sievelib/tests/test_parser.py::ValidSyntaxes::test_singletest_testlist", "sievelib/tests/test_parser.py::ValidSyntaxes::test_string_with_bracket_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_true_test", "sievelib/tests/test_parser.py::ValidSyntaxes::test_truefalse_testlist", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_basic", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_medium", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_limit", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_multiline", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_single_mail_address", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_comma_inside_arguments", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_empty_not", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_empty_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_extra_arg", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_comma_in_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_comma_in_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_parenthesis", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_missing_semicolon", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_missing_semicolon_in_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nested_comments", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_non_ordered_args", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_tests_list2", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonopened_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonopened_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_unclosed_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_unknown_token", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_arg_value", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_arg_value2", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_comparator_value", "sievelib/tests/test_parser.py::LanguageRestrictions::test_exists_get_string_or_list", "sievelib/tests/test_parser.py::LanguageRestrictions::test_fileinto_create_without_fileinto", "sievelib/tests/test_parser.py::LanguageRestrictions::test_fileinto_create_without_mailbox", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_elsif", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_elsif2", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_nested_elsif", "sievelib/tests/test_parser.py::LanguageRestrictions::test_not_included_extension", "sievelib/tests/test_parser.py::LanguageRestrictions::test_test_outside_control", "sievelib/tests/test_parser.py::LanguageRestrictions::test_unexpected_argument", "sievelib/tests/test_parser.py::LanguageRestrictions::test_unknown_control", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_command", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_command_timezone", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_extension_not_loaded", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_norel", "sievelib/tests/test_parser.py::DateCommands::test_date_command", "sievelib/tests/test_parser.py::VariablesCommands::test_set_command", "sievelib/tests/test_parser.py::CopyWithoutSideEffectsTestCase::test_fileinto_with_copy", "sievelib/tests/test_parser.py::CopyWithoutSideEffectsTestCase::test_redirect_with_copy" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-03-18T09:11:44Z"
mit
tonioo__sievelib-86
diff --git a/sievelib/commands.py b/sievelib/commands.py index acb4535..fa133d5 100644 --- a/sievelib/commands.py +++ b/sievelib/commands.py @@ -798,7 +798,16 @@ class ExistsCommand(TestCommand): ] def args_as_tuple(self): + """FIXME: en fonction de la manière dont la commande a été générée + (factory ou parser), le type des arguments est différent : + string quand ça vient de la factory ou type normal depuis le + parser. Il faut uniformiser tout ça !! + + """ value = self.arguments["header-names"] + if isinstance(value, list): + value = "[{}]".format( + ",".join('"{}"'.format(item) for item in value)) if not value.startswith("["): return ('exists', value.strip('"')) return ("exists", ) + tuple(tools.to_list(value))
tonioo/sievelib
df786d65f3279ee3e28d2565f4b843b0939c304e
diff --git a/sievelib/tests/test_factory.py b/sievelib/tests/test_factory.py index f425363..7787e5c 100644 --- a/sievelib/tests/test_factory.py +++ b/sievelib/tests/test_factory.py @@ -5,6 +5,7 @@ import unittest import six from sievelib.factory import FiltersSet +from .. import parser class FactoryTestCase(unittest.TestCase): @@ -91,6 +92,21 @@ class FactoryTestCase(unittest.TestCase): conditions = self.fs.get_filter_conditions("ruleE") self.assertEqual(orig_conditions, conditions) + def test_get_filter_conditions_from_parser_result(self): + res = """require ["fileinto"]; + +# rule:[test] +if anyof (exists ["Subject"]) { + fileinto "INBOX"; +} +""" + p = parser.Parser() + p.parse(res) + fs = FiltersSet("test", '# rule:') + fs.from_parser_result(p) + c = fs.get_filter_conditions('[test]') + self.assertEqual(c, [("exists", "Subject")]) + def test_get_filter_matchtype(self): """Test get_filter_matchtype method.""" self.fs.addfilter(
factory: problem while exporting exists command When we try to export an exists command, previously parsed, through the factory module, it fails with the following stack: ```python >>> c = fs.get_filter_conditions('[test]') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/matteo/src/webmail/be/qbservice/sievelib/factory.py", line 387, in get_filter_conditions args = node.args_as_tuple() File "/home/matteo/src/webmail/be/qbservice/sievelib/commands.py", line 802, in args_as_tuple if not value.startswith("["): AttributeError: 'list' object has no attribute 'startswith' ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_conditions_from_parser_result" ]
[ "sievelib/tests/test_factory.py::FactoryTestCase::test_add_body_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_currentdate_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_envelope_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_exists_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_exists_filter_with_not", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_filter_unicode", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_header_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_header_filter_with_not", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_notbody_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_notenvelope_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_add_size_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_disablefilter", "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_actions", "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_conditions", "sievelib/tests/test_factory.py::FactoryTestCase::test_get_filter_matchtype", "sievelib/tests/test_factory.py::FactoryTestCase::test_remove_filter", "sievelib/tests/test_factory.py::FactoryTestCase::test_use_action_with_tag" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2019-03-21T08:53:34Z"
mit
tonioo__sievelib-97
diff --git a/sievelib/parser.py b/sievelib/parser.py index a92f5f1..219ca94 100755 --- a/sievelib/parser.py +++ b/sievelib/parser.py @@ -142,7 +142,7 @@ class Parser(object): self.__curcommand = None self.__curstringlist = None self.__expected = None - self.__opened_blocks = 0 + self.__expected_brackets = [] RequireCommand.loaded_extensions = [] def __set_expected(self, *args, **kwargs): @@ -153,6 +153,28 @@ class Parser(object): """ self.__expected = args + def __push_expected_bracket(self, ttype, tvalue): + """Append a new expected bracket. + + Next time a bracket is closed, it must match the one provided here. + """ + self.__expected_brackets.append((ttype, tvalue)) + + def __pop_expected_bracket(self, ttype, tvalue): + """Drop the last expected bracket. + + If the given bracket doesn't match the dropped expected bracket, + or if no bracket is expected at all, a ParseError will be raised. + """ + try: + etype, evalue = self.__expected_brackets.pop() + except IndexError: + raise ParseError("unexpected closing bracket %s (none opened)" % + (tvalue,)) + if ttype != etype: + raise ParseError("unexpected closing bracket %s (expected %s)" % + (tvalue, evalue)) + def __up(self, onlyrecord=False): """Return to the current command's parent @@ -251,6 +273,7 @@ class Parser(object): self.__set_expected("string") return True if ttype == "right_bracket": + self.__pop_expected_bracket(ttype, tvalue) self.__curcommand.check_next_arg("stringlist", self.__curstringlist) self.__cstate = self.__arguments return self.__check_command_completion() @@ -275,6 +298,7 @@ class Parser(object): return self.__curcommand.check_next_arg(ttype, tvalue.decode("ascii")) if ttype == "left_bracket": + self.__push_expected_bracket("right_bracket", b'}') self.__cstate = self.__stringlist self.__curstringlist = [] self.__set_expected("string") @@ -314,6 +338,7 @@ class Parser(object): return self.__check_command_completion(testsemicolon=False) if ttype == "left_parenthesis": + self.__push_expected_bracket("right_parenthesis", b')') self.__set_expected("identifier") return True @@ -322,6 +347,7 @@ class Parser(object): return True if ttype == "right_parenthesis": + self.__pop_expected_bracket(ttype, tvalue) self.__up() return True @@ -348,8 +374,8 @@ class Parser(object): """ if self.__cstate is None: if ttype == "right_cbracket": + self.__pop_expected_bracket(ttype, tvalue) self.__up() - self.__opened_blocks -= 1 self.__cstate = None return True @@ -376,7 +402,7 @@ class Parser(object): return True if ttype == "left_cbracket": - self.__opened_blocks += 1 + self.__push_expected_bracket("right_cbracket", b'}') self.__cstate = None return True @@ -438,8 +464,8 @@ class Parser(object): % (tvalue.decode(), text.decode()[self.lexer.pos]) ) raise ParseError(msg) - if self.__opened_blocks: - self.__set_expected("right_cbracket") + if self.__expected_brackets: + self.__set_expected(self.__expected_brackets[-1][0]) if self.__expected is not None: raise ParseError("end of script reached while %s expected" % "|".join(self.__expected))
tonioo/sievelib
2603aa2a24fb32132e40b006a73d2aadc5f66355
diff --git a/sievelib/tests/test_parser.py b/sievelib/tests/test_parser.py index 8890eac..f41cf86 100644 --- a/sievelib/tests/test_parser.py +++ b/sievelib/tests/test_parser.py @@ -589,6 +589,16 @@ if header :is "Sender" "[email protected]" { """) + def test_nonopened_parenthesis(self): + self.compilation_ko(b""" +if header :is "Sender" "[email protected]") { + discard; +} +""") + + def test_nonopened_block2(self): + self.compilation_ko(b"""}""") + def test_unknown_token(self): self.compilation_ko(b""" if header :is "Sender" "Toto" & header :contains "Cc" "Tata" { @@ -599,6 +609,9 @@ if header :is "Sender" "Toto" & header :contains "Cc" "Tata" { def test_empty_string_list(self): self.compilation_ko(b"require [];") + def test_unopened_string_list(self): + self.compilation_ko(b'require "fileinto"];') + def test_unclosed_string_list(self): self.compilation_ko(b'require ["toto", "tata";') @@ -834,7 +847,7 @@ class VariablesCommands(SieveTest): self.compilation_ok(b"""require ["variables"]; set "matchsub" "testsubject"; - + if allof ( header :contains ["Subject"] "${header}" )
Parser does insufficient bracket matching The following script crashes the parser instead of generating a parse error: ```}``` Upon investigation, the problem seems to be that the parser doesn't always check if the current context allows for closing a bracket (either of `)]}`), and often just assumes we're in the right context when one of them is encountered. A solution could be for the parser to maintain a stack of open brackets (e.g. a list `['(', '{', '[']`) and `push` to it on each opening bracket, and when closing one, `pop`s the last bracket and confirm it's the same as the one we're closing, otherwise issuing a parse error. I shall try to implement such a solution and file it as a PR. But maybe another solution would be preferred? Cf. derula/strainer#15
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonopened_block2", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonopened_parenthesis" ]
[ "sievelib/tests/test_parser.py::AdditionalCommands::test_add_command", "sievelib/tests/test_parser.py::AdditionalCommands::test_quota_notification", "sievelib/tests/test_parser.py::ValidEncodings::test_utf8_file", "sievelib/tests/test_parser.py::ValidSyntaxes::test_body_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_bracket_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_complex_allof_with_not", "sievelib/tests/test_parser.py::ValidSyntaxes::test_explicit_comparator", "sievelib/tests/test_parser.py::ValidSyntaxes::test_fileinto_create", "sievelib/tests/test_parser.py::ValidSyntaxes::test_hash_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_imap4flags_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_imap4flags_hasflag", "sievelib/tests/test_parser.py::ValidSyntaxes::test_just_one_command", "sievelib/tests/test_parser.py::ValidSyntaxes::test_multiline_string", "sievelib/tests/test_parser.py::ValidSyntaxes::test_multiple_not", "sievelib/tests/test_parser.py::ValidSyntaxes::test_nested_blocks", "sievelib/tests/test_parser.py::ValidSyntaxes::test_non_ordered_args", "sievelib/tests/test_parser.py::ValidSyntaxes::test_reject_extension", "sievelib/tests/test_parser.py::ValidSyntaxes::test_rfc5228_extended", "sievelib/tests/test_parser.py::ValidSyntaxes::test_singletest_testlist", "sievelib/tests/test_parser.py::ValidSyntaxes::test_string_with_bracket_comment", "sievelib/tests/test_parser.py::ValidSyntaxes::test_true_test", "sievelib/tests/test_parser.py::ValidSyntaxes::test_truefalse_testlist", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_basic", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_medium", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_limit", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_multiline", "sievelib/tests/test_parser.py::ValidSyntaxes::test_vacationext_with_single_mail_address", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_comma_inside_arguments", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_empty_not", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_empty_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_extra_arg", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_comma_in_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_comma_in_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_misplaced_parenthesis", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_missing_semicolon", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_missing_semicolon_in_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nested_comments", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_non_ordered_args", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonclosed_tests_list2", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonopened_block", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_nonopened_tests_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_unclosed_string_list", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_unknown_token", "sievelib/tests/test_parser.py::InvalidSyntaxes::test_unopened_string_list", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_arg_value", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_arg_value2", "sievelib/tests/test_parser.py::LanguageRestrictions::test_bad_comparator_value", "sievelib/tests/test_parser.py::LanguageRestrictions::test_exists_get_string_or_list", "sievelib/tests/test_parser.py::LanguageRestrictions::test_fileinto_create_without_fileinto", "sievelib/tests/test_parser.py::LanguageRestrictions::test_fileinto_create_without_mailbox", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_elsif", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_elsif2", "sievelib/tests/test_parser.py::LanguageRestrictions::test_misplaced_nested_elsif", "sievelib/tests/test_parser.py::LanguageRestrictions::test_not_included_extension", "sievelib/tests/test_parser.py::LanguageRestrictions::test_test_outside_control", "sievelib/tests/test_parser.py::LanguageRestrictions::test_unexpected_argument", "sievelib/tests/test_parser.py::LanguageRestrictions::test_unknown_control", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_command", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_command_timezone", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_extension_not_loaded", "sievelib/tests/test_parser.py::DateCommands::test_currentdate_norel", "sievelib/tests/test_parser.py::DateCommands::test_date_command", "sievelib/tests/test_parser.py::VariablesCommands::test_set_command", "sievelib/tests/test_parser.py::CopyWithoutSideEffectsTestCase::test_fileinto_with_copy", "sievelib/tests/test_parser.py::CopyWithoutSideEffectsTestCase::test_redirect_with_copy" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-06-14T13:50:47Z"
mit
tonybaloney__wily-200
diff --git a/src/wily/__main__.py b/src/wily/__main__.py index 1b4c7ef..617aefe 100644 --- a/src/wily/__main__.py +++ b/src/wily/__main__.py @@ -11,6 +11,7 @@ from wily.archivers import resolve_archiver from wily.cache import exists, get_default_metrics from wily.config import DEFAULT_CONFIG_PATH, DEFAULT_GRID_STYLE from wily.config import load as load_config +from wily.helper import get_style from wily.helper.custom_enums import ReportFormat from wily.lang import _ from wily.operators import resolve_operators @@ -279,6 +280,8 @@ def report( else: new_output = new_output / "wily_report" / "index.html" + style = get_style(console_format) + from wily.commands.report import report logger.debug(f"Running report on {file} for metric {metrics}") @@ -292,7 +295,7 @@ def report( output=new_output, include_message=message, format=ReportFormat[format], - console_format=console_format, + console_format=style, changes_only=changes, ) diff --git a/src/wily/commands/diff.py b/src/wily/commands/diff.py index fd05c91..fc19c6f 100644 --- a/src/wily/commands/diff.py +++ b/src/wily/commands/diff.py @@ -14,7 +14,8 @@ import tabulate from wily import format_date, format_revision, logger from wily.archivers import resolve_archiver from wily.commands.build import run_operator -from wily.config import DEFAULT_GRID_STYLE, DEFAULT_PATH +from wily.config import DEFAULT_PATH +from wily.helper import get_style from wily.operators import ( BAD_COLORS, GOOD_COLORS, @@ -160,9 +161,8 @@ def diff(config, files, metrics, changes_only=True, detail=True, revision=None): descriptions = [metric.description for operator, metric in metrics] headers = ("File", *descriptions) if len(results) > 0: + style = get_style() print( # But it still makes more sense to show the newest at the top, so reverse again - tabulate.tabulate( - headers=headers, tabular_data=results, tablefmt=DEFAULT_GRID_STYLE - ) + tabulate.tabulate(headers=headers, tabular_data=results, tablefmt=style) ) diff --git a/src/wily/commands/index.py b/src/wily/commands/index.py index 8150bdf..e7f1a55 100644 --- a/src/wily/commands/index.py +++ b/src/wily/commands/index.py @@ -6,7 +6,7 @@ Print information about the wily cache and what is in the index. import tabulate from wily import MAX_MESSAGE_WIDTH, format_date, format_revision, logger -from wily.config import DEFAULT_GRID_STYLE +from wily.helper import get_style from wily.state import State @@ -54,8 +54,6 @@ def index(config, include_message=False): headers = ("Revision", "Author", "Message", "Date") else: headers = ("Revision", "Author", "Date") - print( - tabulate.tabulate( - headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE - ) - ) + + style = get_style() + print(tabulate.tabulate(headers=headers, tabular_data=data, tablefmt=style)) diff --git a/src/wily/commands/list_metrics.py b/src/wily/commands/list_metrics.py index 6a8ef61..d9b81d1 100644 --- a/src/wily/commands/list_metrics.py +++ b/src/wily/commands/list_metrics.py @@ -5,12 +5,13 @@ TODO : Only show metrics for the operators that the cache has? """ import tabulate -from wily.config import DEFAULT_GRID_STYLE +from wily.helper import get_style from wily.operators import ALL_OPERATORS def list_metrics(): """List metrics available.""" + style = get_style() for name, operator in ALL_OPERATORS.items(): print(f"{name} operator:") if len(operator.cls.metrics) > 0: @@ -18,6 +19,6 @@ def list_metrics(): tabulate.tabulate( headers=("Name", "Description", "Type"), tabular_data=operator.cls.metrics, - tablefmt=DEFAULT_GRID_STYLE, + tablefmt=style, ) ) diff --git a/src/wily/commands/rank.py b/src/wily/commands/rank.py index 5600b81..32a3e86 100644 --- a/src/wily/commands/rank.py +++ b/src/wily/commands/rank.py @@ -17,7 +17,8 @@ import tabulate from wily import format_date, format_revision, logger from wily.archivers import resolve_archiver -from wily.config import DEFAULT_GRID_STYLE, DEFAULT_PATH +from wily.config import DEFAULT_PATH +from wily.helper import get_style from wily.operators import resolve_metric_as_tuple from wily.state import State @@ -117,11 +118,8 @@ def rank(config, path, metric, revision_index, limit, threshold, descending): data.append(["Total", total]) headers = ("File", metric.description) - print( - tabulate.tabulate( - headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE - ) - ) + style = get_style() + print(tabulate.tabulate(headers=headers, tabular_data=data, tablefmt=style)) if threshold and total < threshold: logger.error( diff --git a/src/wily/helper/__init__.py b/src/wily/helper/__init__.py index a1f80be..7c52235 100644 --- a/src/wily/helper/__init__.py +++ b/src/wily/helper/__init__.py @@ -1,1 +1,12 @@ """Helper package for wily.""" +import sys + +from wily.config import DEFAULT_GRID_STYLE + + +def get_style(style=DEFAULT_GRID_STYLE): + """Select the tablefmt style for tabulate according to what sys.stdout can handle.""" + if style == DEFAULT_GRID_STYLE: + if sys.stdout.encoding.lower() not in ("utf-8", "utf8"): + style = "grid" + return style
tonybaloney/wily
2f59f943d935b0fc4101608783178911dfddbba0
diff --git a/test/unit/test_helper.py b/test/unit/test_helper.py new file mode 100644 index 0000000..6b29d3a --- /dev/null +++ b/test/unit/test_helper.py @@ -0,0 +1,26 @@ +from io import BytesIO, TextIOWrapper +from unittest import mock + +from wily.config import DEFAULT_GRID_STYLE +from wily.helper import get_style + + +def test_get_style(): + output = TextIOWrapper(BytesIO(), encoding="utf-8") + with mock.patch("sys.stdout", output): + style = get_style() + assert style == DEFAULT_GRID_STYLE + + +def test_get_style_charmap(): + output = TextIOWrapper(BytesIO(), encoding="charmap") + with mock.patch("sys.stdout", output): + style = get_style() + assert style == "grid" + + +def test_get_style_charmap_not_default_grid_style(): + output = TextIOWrapper(BytesIO(), encoding="charmap") + with mock.patch("sys.stdout", output): + style = get_style("something_else") + assert style == "something_else"
Wily diff output Hi, I wanted to use wily as an external tool in Pycharm, in order to see the wily diff of a file. `wily.exe diff foobar.py` However this fails and I get this message: ` ... File "c:\programdata\anaconda3\envs\foobar\lib\encodings\cp1252.py", line 19, in encode return codecs.charmap_encode(input,self.errors,encoding_table)[0] UnicodeEncodeError: 'charmap' codec can't encode characters in position 0-123: character maps to <undefined>` Same happens in the cmd when I wants to pipe the output to a file. `wily.exe diff foobar.py > wilydiff.txt` The output options are not valid for the diffs. Any idea? Thanks
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/unit/test_helper.py::test_get_style", "test/unit/test_helper.py::test_get_style_charmap", "test/unit/test_helper.py::test_get_style_charmap_not_default_grid_style" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-07-15T19:16:04Z"
apache-2.0
tonybaloney__wily-201
diff --git a/src/wily/__main__.py b/src/wily/__main__.py index 067f643..377c7c8 100644 --- a/src/wily/__main__.py +++ b/src/wily/__main__.py @@ -154,7 +154,13 @@ def build(ctx, max_revisions, targets, operators, archiver): @click.option( "-m", "--message/--no-message", default=False, help=_("Include revision message") ) -def index(ctx, message): [email protected]( + "-w", + "--wrap/--no-wrap", + default=True, + help=_("Wrap index text to fit in terminal"), +) +def index(ctx, message, wrap): """Show the history archive in the .wily/ folder.""" config = ctx.obj["CONFIG"] @@ -163,7 +169,7 @@ def index(ctx, message): from wily.commands.index import index - index(config=config, include_message=message) + index(config=config, include_message=message, wrap=wrap) @cli.command( @@ -206,8 +212,14 @@ def index(ctx, message): help=_("Return a non-zero exit code under the specified threshold"), type=click.INT, ) [email protected]( + "-w", + "--wrap/--no-wrap", + default=True, + help=_("Wrap rank text to fit in terminal"), +) @click.pass_context -def rank(ctx, path, metric, revision, limit, desc, threshold): +def rank(ctx, path, metric, revision, limit, desc, threshold, wrap): """Rank files, methods and functions in order of any metrics, e.g. complexity.""" config = ctx.obj["CONFIG"] @@ -225,6 +237,7 @@ def rank(ctx, path, metric, revision, limit, desc, threshold): limit=limit, threshold=threshold, descending=desc, + wrap=wrap, ) @@ -260,9 +273,15 @@ def rank(ctx, path, metric, revision, limit, desc, threshold): default=False, help=_("Only show revisions that have changes"), ) [email protected]( + "-w", + "--wrap/--no-wrap", + default=True, + help=_("Wrap report text to fit in terminal"), +) @click.pass_context def report( - ctx, file, metrics, number, message, format, console_format, output, changes + ctx, file, metrics, number, message, format, console_format, output, changes, wrap ): """Show metrics for a given file.""" config = ctx.obj["CONFIG"] @@ -297,6 +316,7 @@ def report( format=ReportFormat[format], console_format=style, changes_only=changes, + wrap=wrap, ) @@ -322,8 +342,14 @@ def report( @click.option( "-r", "--revision", help=_("Compare against specific revision"), type=click.STRING ) [email protected]( + "-w", + "--wrap/--no-wrap", + default=True, + help=_("Wrap diff text to fit in terminal"), +) @click.pass_context -def diff(ctx, files, metrics, all, detail, revision): +def diff(ctx, files, metrics, all, detail, revision, wrap): """Show the differences in metrics for each file.""" config = ctx.obj["CONFIG"] @@ -347,6 +373,7 @@ def diff(ctx, files, metrics, all, detail, revision): changes_only=not all, detail=detail, revision=revision, + wrap=wrap, ) @@ -432,8 +459,14 @@ def clean(ctx, yes): @cli.command("list-metrics", help=_("""List the available metrics.""")) [email protected]( + "-w", + "--wrap/--no-wrap", + default=True, + help=_("Wrap metrics text to fit in terminal"), +) @click.pass_context -def list_metrics(ctx): +def list_metrics(ctx, wrap): """List the available metrics.""" config = ctx.obj["CONFIG"] @@ -442,7 +475,7 @@ def list_metrics(ctx): from wily.commands.list_metrics import list_metrics - list_metrics() + list_metrics(wrap) @cli.command("setup", help=_("""Run a guided setup to build the wily cache.""")) diff --git a/src/wily/commands/diff.py b/src/wily/commands/diff.py index fc19c6f..ec0768b 100644 --- a/src/wily/commands/diff.py +++ b/src/wily/commands/diff.py @@ -15,7 +15,7 @@ from wily import format_date, format_revision, logger from wily.archivers import resolve_archiver from wily.commands.build import run_operator from wily.config import DEFAULT_PATH -from wily.helper import get_style +from wily.helper import get_maxcolwidth, get_style from wily.operators import ( BAD_COLORS, GOOD_COLORS, @@ -27,7 +27,9 @@ from wily.operators import ( from wily.state import State -def diff(config, files, metrics, changes_only=True, detail=True, revision=None): +def diff( + config, files, metrics, changes_only=True, detail=True, revision=None, wrap=False +): """ Show the differences in metrics for each of the files. @@ -161,8 +163,15 @@ def diff(config, files, metrics, changes_only=True, detail=True, revision=None): descriptions = [metric.description for operator, metric in metrics] headers = ("File", *descriptions) if len(results) > 0: + maxcolwidth = get_maxcolwidth(headers, wrap) style = get_style() print( # But it still makes more sense to show the newest at the top, so reverse again - tabulate.tabulate(headers=headers, tabular_data=results, tablefmt=style) + tabulate.tabulate( + headers=headers, + tabular_data=results, + tablefmt=style, + maxcolwidths=maxcolwidth, + maxheadercolwidths=maxcolwidth, + ) ) diff --git a/src/wily/commands/index.py b/src/wily/commands/index.py index e7f1a55..d8bebc9 100644 --- a/src/wily/commands/index.py +++ b/src/wily/commands/index.py @@ -6,11 +6,11 @@ Print information about the wily cache and what is in the index. import tabulate from wily import MAX_MESSAGE_WIDTH, format_date, format_revision, logger -from wily.helper import get_style +from wily.helper import get_maxcolwidth, get_style from wily.state import State -def index(config, include_message=False): +def index(config, include_message=False, wrap=False): """ Show information about the cache and runtime. @@ -54,6 +54,14 @@ def index(config, include_message=False): headers = ("Revision", "Author", "Message", "Date") else: headers = ("Revision", "Author", "Date") - + maxcolwidth = get_maxcolwidth(headers, wrap) style = get_style() - print(tabulate.tabulate(headers=headers, tabular_data=data, tablefmt=style)) + print( + tabulate.tabulate( + headers=headers, + tabular_data=data, + tablefmt=style, + maxcolwidths=maxcolwidth, + maxheadercolwidths=maxcolwidth, + ) + ) diff --git a/src/wily/commands/list_metrics.py b/src/wily/commands/list_metrics.py index d9b81d1..572b0ee 100644 --- a/src/wily/commands/list_metrics.py +++ b/src/wily/commands/list_metrics.py @@ -5,20 +5,24 @@ TODO : Only show metrics for the operators that the cache has? """ import tabulate -from wily.helper import get_style +from wily.helper import get_maxcolwidth, get_style from wily.operators import ALL_OPERATORS -def list_metrics(): +def list_metrics(wrap): """List metrics available.""" + headers = ("Name", "Description", "Type", "Measure", "Aggregate") + maxcolwidth = get_maxcolwidth(headers, wrap) style = get_style() for name, operator in ALL_OPERATORS.items(): print(f"{name} operator:") if len(operator.cls.metrics) > 0: print( tabulate.tabulate( - headers=("Name", "Description", "Type"), + headers=headers, tabular_data=operator.cls.metrics, tablefmt=style, + maxcolwidths=maxcolwidth, + maxheadercolwidths=maxcolwidth, ) ) diff --git a/src/wily/commands/rank.py b/src/wily/commands/rank.py index 29c63cf..8f11710 100644 --- a/src/wily/commands/rank.py +++ b/src/wily/commands/rank.py @@ -18,12 +18,12 @@ import tabulate from wily import format_date, format_revision, logger from wily.archivers import resolve_archiver from wily.config import DEFAULT_PATH -from wily.helper import get_style +from wily.helper import get_maxcolwidth, get_style from wily.operators import resolve_metric_as_tuple from wily.state import State -def rank(config, path, metric, revision_index, limit, threshold, descending): +def rank(config, path, metric, revision_index, limit, threshold, descending, wrap): """ Rank command ordering files, methods or functions using metrics. @@ -121,8 +121,17 @@ def rank(config, path, metric, revision_index, limit, threshold, descending): data.append(["Total", total]) headers = ("File", metric.description) + maxcolwidth = get_maxcolwidth(headers, wrap) style = get_style() - print(tabulate.tabulate(headers=headers, tabular_data=data, tablefmt=style)) + print( + tabulate.tabulate( + headers=headers, + tabular_data=data, + tablefmt=style, + maxcolwidths=maxcolwidth, + maxheadercolwidths=maxcolwidth, + ) + ) if threshold and total < threshold: logger.error( diff --git a/src/wily/commands/report.py b/src/wily/commands/report.py index 2dd0bfa..50a99ea 100644 --- a/src/wily/commands/report.py +++ b/src/wily/commands/report.py @@ -11,6 +11,7 @@ from string import Template import tabulate from wily import MAX_MESSAGE_WIDTH, format_date, format_revision, logger +from wily.helper import get_maxcolwidth from wily.helper.custom_enums import ReportFormat from wily.lang import _ from wily.operators import MetricType, resolve_metric_as_tuple @@ -31,6 +32,7 @@ def report( format=ReportFormat.CONSOLE, console_format=None, changes_only=False, + wrap=False, ): """ Show metrics for a given file. @@ -211,8 +213,13 @@ def report( logger.info(f"wily report was saved to {report_path}") else: + maxcolwidth = get_maxcolwidth(headers, wrap) print( tabulate.tabulate( - headers=headers, tabular_data=data[::-1], tablefmt=console_format + headers=headers, + tabular_data=data[::-1], + tablefmt=console_format, + maxcolwidths=maxcolwidth, + maxheadercolwidths=maxcolwidth, ) ) diff --git a/src/wily/helper/__init__.py b/src/wily/helper/__init__.py index 7c52235..d8c8347 100644 --- a/src/wily/helper/__init__.py +++ b/src/wily/helper/__init__.py @@ -1,9 +1,26 @@ """Helper package for wily.""" +import shutil import sys from wily.config import DEFAULT_GRID_STYLE +def get_maxcolwidth(headers, wrap=True): + """Calculate the maximum column width for a given terminal width.""" + if not wrap: + return + width = shutil.get_terminal_size()[0] + columns = len(headers) + if width < 80: + padding = columns + 2 + elif width < 120: + padding = columns - 2 + else: + padding = columns - 4 + maxcolwidth = (width // columns) - padding + return max(maxcolwidth, 1) + + def get_style(style=DEFAULT_GRID_STYLE): """Select the tablefmt style for tabulate according to what sys.stdout can handle.""" if style == DEFAULT_GRID_STYLE:
tonybaloney/wily
b0411ae861b8adc39422328abc6b159e64381715
diff --git a/test/integration/test_diff.py b/test/integration/test_diff.py index 102d31a..2d1760d 100644 --- a/test/integration/test_diff.py +++ b/test/integration/test_diff.py @@ -45,6 +45,18 @@ def test_diff_output_all(builddir): assert "test.py" in result.stdout +def test_diff_output_all_wrapped(builddir): + """Test the diff feature with wrapping""" + runner = CliRunner() + result = runner.invoke( + main.cli, + ["--debug", "--path", builddir, "diff", _path, "--all", "--wrap"], + catch_exceptions=False, + ) + assert result.exit_code == 0, result.stdout + assert "test.py" in result.stdout + + def test_diff_output_bad_path(builddir): """Test the diff feature with no changes""" runner = CliRunner() diff --git a/test/integration/test_index.py b/test/integration/test_index.py index 2d9cad8..1146565 100644 --- a/test/integration/test_index.py +++ b/test/integration/test_index.py @@ -34,3 +34,18 @@ def test_index_with_messages(builddir): assert "add line" in result.stdout assert "remove line" in result.stdout assert result.exit_code == 0, result.stdout + + +def test_index_with_messages_wrapped(builddir): + """ + Test that index works with a build with git commit messages and wrapping + """ + runner = CliRunner() + result = runner.invoke( + main.cli, ["--path", builddir, "index", "--message", "--wrap"] + ) + assert result.stdout.count("An author") == 3 + assert "basic test" in result.stdout + assert "add line" in result.stdout + assert "remove line" in result.stdout + assert result.exit_code == 0, result.stdout diff --git a/test/integration/test_list_metrics.py b/test/integration/test_list_metrics.py new file mode 100644 index 0000000..660b3ab --- /dev/null +++ b/test/integration/test_list_metrics.py @@ -0,0 +1,32 @@ +from click.testing import CliRunner + +import wily.__main__ as main + + +def test_list_metrics(builddir): + """ + Test that list-metrics works and is ordered + """ + runner = CliRunner() + result = runner.invoke(main.cli, ["list-metrics"]) + assert result.stdout.count("operator") == 4 + assert "cyclomatic" in result.stdout + assert "maintainability" in result.stdout + assert "raw" in result.stdout + assert "halstead" in result.stdout + # Test ordering + i = result.stdout.index + assert i("cyclomatic") < i("maintainability") < i("raw") < i("halstead") + + +def test_list_metrics_wrapped(builddir): + """ + Test that list-metrics works with wrapping + """ + runner = CliRunner() + result = runner.invoke(main.cli, ["list-metrics", "--wrap"]) + assert result.stdout.count("operator") == 4 + assert "cyclomatic" in result.stdout + assert "maintainability" in result.stdout + assert "raw" in result.stdout + assert "halstead" in result.stdout diff --git a/test/integration/test_rank.py b/test/integration/test_rank.py index fe62e13..5e852df 100644 --- a/test/integration/test_rank.py +++ b/test/integration/test_rank.py @@ -18,6 +18,15 @@ def test_rank_single_file_default_metric(builddir): assert result.exit_code == 0, result.stdout +def test_rank_single_file_default_metric_wrapped(builddir): + """Test the rank feature with default metric and wrapping""" + runner = CliRunner() + result = runner.invoke( + main.cli, ["--path", builddir, "rank", "--wrap", "src/test.py"] + ) + assert result.exit_code == 0, result.stdout + + def test_rank_directory_default_metric(builddir): """Test the rank feature with default (AimLow) metric on a directory""" runner = CliRunner() diff --git a/test/integration/test_report.py b/test/integration/test_report.py index c5fa049..082ca4d 100644 --- a/test/integration/test_report.py +++ b/test/integration/test_report.py @@ -139,6 +139,18 @@ def test_report_high_metric(builddir): assert "Not found" not in result.stdout +def test_report_wrapped(builddir): + """ + Test that report works with wrapping + """ + runner = CliRunner() + result = runner.invoke( + main.cli, ["--path", builddir, "report", "--wrap", _path, "raw.comments"] + ) + assert result.exit_code == 0, result.stdout + assert "Not found" not in result.stdout + + def test_report_short_metric(builddir): """ Test that report works with a build on shorthand metric diff --git a/test/unit/test_helper.py b/test/unit/test_helper.py index 6b29d3a..8e64f55 100644 --- a/test/unit/test_helper.py +++ b/test/unit/test_helper.py @@ -1,8 +1,133 @@ from io import BytesIO, TextIOWrapper from unittest import mock +import tabulate + from wily.config import DEFAULT_GRID_STYLE -from wily.helper import get_style +from wily.helper import get_maxcolwidth, get_style + +SHORT_DATA = [list("abcdefgh"), list("abcdefgh")] + +MEDIUM_DATA = [["medium_data"] * 2, ["medium_data"] * 2] + +LONG_DATA = [["long_data"] * 8, ["long_data"] * 8] + +HUGE_DATA = [["huge_data"] * 18, ["huge_data"] * 18] + +LONG_LINE_MEDIUM_DATA = [ + ["long_line_for_some_medium_data"] * 2, + ["long_line_for_some_medium_data"] * 2, +] + + +def test_get_maxcolwidth_no_wrap(): + result = get_maxcolwidth([], False) + assert result is None + + +def test_get_maxcolwidth_wrap_short(): + for width in range(35, 100): + mock_get_terminal_size = mock.Mock(return_value=(width, 24)) + mock_shutil = mock.Mock(get_terminal_size=mock_get_terminal_size) + + with mock.patch("wily.helper.shutil", mock_shutil): + result = get_maxcolwidth(SHORT_DATA[0], True) + as_table = tabulate.tabulate( + tabular_data=SHORT_DATA, + tablefmt="grid", + maxcolwidths=result, + maxheadercolwidths=result, + ) + + line = as_table.splitlines()[0] + assert len(line) < width + assert len(line) >= width / 3 + + +def test_get_maxcolwidth_wrap_medium(): + for width in range(35, 100): + mock_get_terminal_size = mock.Mock(return_value=(width, 24)) + mock_shutil = mock.Mock(get_terminal_size=mock_get_terminal_size) + + with mock.patch("wily.helper.shutil", mock_shutil): + result = get_maxcolwidth(MEDIUM_DATA[0], True) + as_table = tabulate.tabulate( + tabular_data=MEDIUM_DATA, + tablefmt="grid", + maxcolwidths=result, + maxheadercolwidths=result, + ) + + line = as_table.splitlines()[0] + print(line) + print(width, len(line)) + assert len(line) < width + if width < 85: + assert len(line) >= width / 3 + + +def test_get_maxcolwidth_wrap_long_line_medium(): + for width in range(35, 100): + mock_get_terminal_size = mock.Mock(return_value=(width, 24)) + mock_shutil = mock.Mock(get_terminal_size=mock_get_terminal_size) + + with mock.patch("wily.helper.shutil", mock_shutil): + result = get_maxcolwidth(LONG_LINE_MEDIUM_DATA[0], True) + as_table = tabulate.tabulate( + tabular_data=LONG_LINE_MEDIUM_DATA, + tablefmt="grid", + maxcolwidths=result, + maxheadercolwidths=result, + ) + + line = as_table.splitlines()[0] + print(line) + print(width, len(line)) + assert len(line) < width + if width < 85: + assert len(line) >= width / 3 + + +def test_get_maxcolwidth_wrap_long(): + for width in range(35, 290): + mock_get_terminal_size = mock.Mock(return_value=(width, 24)) + mock_shutil = mock.Mock(get_terminal_size=mock_get_terminal_size) + + with mock.patch("wily.helper.shutil", mock_shutil): + result = get_maxcolwidth(LONG_DATA[0], True) + as_table = tabulate.tabulate( + tabular_data=LONG_DATA, + tablefmt="grid", + maxcolwidths=result, + maxheadercolwidths=result, + ) + + line = as_table.splitlines()[0] + assert len(line) < width + if width < 290: + assert len(line) >= width / 3 + + +def test_get_maxcolwidth_wrap_huge(): + for width in range(75, 450): + mock_get_terminal_size = mock.Mock(return_value=(width, 24)) + mock_shutil = mock.Mock(get_terminal_size=mock_get_terminal_size) + + with mock.patch("wily.helper.shutil", mock_shutil): + result = get_maxcolwidth(HUGE_DATA[0], True) + as_table = tabulate.tabulate( + tabular_data=HUGE_DATA, + tablefmt="grid", + maxcolwidths=result, + maxheadercolwidths=result, + ) + + line = as_table.splitlines()[0] + assert len(line) < width + if width < 220: + assert len(line) >= width / 3 + else: + assert len(line) >= width / 4 def test_get_style():
running wily with more than 5 metrics cause problem with printing when running willy with multiple metrics it causes them to overlap
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/integration/test_diff.py::test_diff_no_cache", "test/integration/test_diff.py::test_diff_no_path", "test/integration/test_diff.py::test_diff_output", "test/integration/test_diff.py::test_diff_output_all", "test/integration/test_diff.py::test_diff_output_all_wrapped", "test/integration/test_diff.py::test_diff_output_bad_path", "test/integration/test_diff.py::test_diff_output_remove_all", "test/integration/test_diff.py::test_diff_output_more_complex", "test/integration/test_diff.py::test_diff_output_less_complex", "test/integration/test_diff.py::test_diff_output_loc", "test/integration/test_diff.py::test_diff_output_loc_and_revision", "test/integration/test_diff.py::test_diff_output_rank", "test/integration/test_index.py::test_index_no_cache", "test/integration/test_index.py::test_index", "test/integration/test_index.py::test_index_with_messages", "test/integration/test_index.py::test_index_with_messages_wrapped", "test/integration/test_list_metrics.py::test_list_metrics", "test/integration/test_list_metrics.py::test_list_metrics_wrapped", "test/integration/test_rank.py::test_rank_no_cache", "test/integration/test_rank.py::test_rank_single_file_default_metric", "test/integration/test_rank.py::test_rank_single_file_default_metric_wrapped", "test/integration/test_rank.py::test_rank_directory_default_metric", "test/integration/test_rank.py::test_rank_directory_default_metric_no_path", "test/integration/test_rank.py::test_rank_directory_default_metric_master", "test/integration/test_rank.py::test_rank_directory_default_invalid_revision", "test/integration/test_rank.py::test_rank_directory_default_unindexed_revision", "test/integration/test_rank.py::test_rank_single_file_informational", "test/integration/test_rank.py::test_rank_directory_custom_metric", "test/integration/test_rank.py::test_rank_directory_no_path_target", "test/integration/test_rank.py::test_rank_directory_limit", "test/integration/test_rank.py::test_rank_directory_desc", "test/integration/test_rank.py::test_rank_directory_invalid_key", "test/integration/test_rank.py::test_rank_directory_asc", "test/integration/test_rank.py::test_rank_total_above_threshold", "test/integration/test_rank.py::test_rank_total_below_threshold", "test/integration/test_report.py::test_report_no_cache", "test/integration/test_report.py::test_report", "test/integration/test_report.py::test_report_granular", "test/integration/test_report.py::test_report_not_found", "test/integration/test_report.py::test_report_default_metrics", "test/integration/test_report.py::test_report_path", "test/integration/test_report.py::test_report_with_message", "test/integration/test_report.py::test_report_with_message_and_n", "test/integration/test_report.py::test_report_changes_only", "test/integration/test_report.py::test_report_high_metric", "test/integration/test_report.py::test_report_wrapped", "test/integration/test_report.py::test_report_short_metric", "test/integration/test_report.py::test_report_low_metric", "test/integration/test_report.py::test_report_html_format", "test/integration/test_report.py::test_report_html_format_target_folder", "test/integration/test_report.py::test_report_html_format_target_file", "test/integration/test_report.py::test_report_console_format", "test/integration/test_report.py::test_report_not_existing_format", "test/unit/test_helper.py::test_get_maxcolwidth_no_wrap", "test/unit/test_helper.py::test_get_maxcolwidth_wrap_short", "test/unit/test_helper.py::test_get_maxcolwidth_wrap_medium", "test/unit/test_helper.py::test_get_maxcolwidth_wrap_long_line_medium", "test/unit/test_helper.py::test_get_maxcolwidth_wrap_long", "test/unit/test_helper.py::test_get_maxcolwidth_wrap_huge", "test/unit/test_helper.py::test_get_style", "test/unit/test_helper.py::test_get_style_charmap", "test/unit/test_helper.py::test_get_style_charmap_not_default_grid_style" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-07-16T15:15:03Z"
apache-2.0
tonybaloney__wily-209
diff --git a/docs/source/commands/graph.rst b/docs/source/commands/graph.rst index a0cc9dd..70bdbfb 100644 --- a/docs/source/commands/graph.rst +++ b/docs/source/commands/graph.rst @@ -6,11 +6,11 @@ The graph command generates HTML graphs for metrics, trends and data in the wily Examples -------- -``wily graph`` will take 1 or 2 metrics as the 2nd and 3rd arguments. The first metric will be the Y-axis and the 3rd metric (if provided) will control the size of the bubble. +``wily graph`` will take 1 or 2 comma-separated metrics as the -m option. The first metric will be the Y-axis and the 2nd metric (if provided) will control the size of the bubble. .. code-block:: none - $ wily graph example.py loc + $ wily graph example.py -m loc .. image:: ../_static/single_metric_graph.png :align: center @@ -19,7 +19,7 @@ You can provide a second metric which will be used to control the size of the bu .. code-block:: none - $ wily graph example.py loc complexity + $ wily graph example.py loc,complexity .. image:: ../_static/two_metric_graph.png :align: center @@ -28,7 +28,7 @@ The x-axis will be the historic revisions (typically git commits) on a scale of .. code-block:: none - $ wily graph example.py loc complexity --x-axis sloc + $ wily graph example.py -m loc,complexity --x-axis sloc .. image:: ../_static/custom_x_axis_graph.png :align: center @@ -56,7 +56,15 @@ To save the output to a specific HTML file and not open it, provide the ``-o`` f .. code-block:: none - $ wily report example.py loc -o example.html + $ wily report example.py -m loc -o example.html + +By default, ``wily graph`` will create an HTML file containing all the JS necessary to render the graph. +To create a standalone plotly.min.js file in the same directory as the HTML file instead, pass the ``--shared-js`Β΄ option. +To point the HTML file to a CDN hosted plotly.min.js instead, pass the ``--cdn-js`Β΄ option. + +.. code-block:: none + + $ wily report example.py -m loc --shared=js Command Line Usage diff --git a/src/wily/__main__.py b/src/wily/__main__.py index 1c3b0cc..4489ae5 100644 --- a/src/wily/__main__.py +++ b/src/wily/__main__.py @@ -397,6 +397,10 @@ def diff(ctx, files, metrics, all, detail, revision, wrap): Graph test.py against raw.loc and raw.sloc on the x-axis $ wily graph src/test.py -m raw.loc --x-axis raw.sloc + + Graph test.py against raw.loc creating a standalone plotly.min.js file + + $ wily graph src/test.py -m raw.loc --shared-js """ ) ) @@ -422,14 +426,34 @@ def diff(ctx, files, metrics, all, detail, revision, wrap): default=False, help=_("Aggregate if path is directory"), ) [email protected]( + "--shared-js/--no-shared-js", + default=False, + type=click.BOOL, + help=_("Create standalone plotly.min.js in the graph directory."), +) [email protected]( + "--cdn-js/--no-cdn-js", + default=False, + type=click.BOOL, + help=_("Point to a CDN hosted plotly.min.js."), +) @click.pass_context -def graph(ctx, path, metrics, output, x_axis, changes, aggregate): +def graph(ctx, path, metrics, output, x_axis, changes, aggregate, shared_js, cdn_js): """Output report to specified HTML path, e.g. reports/out.html.""" config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) + # Embed plotly.min.js in the HTML file by default + plotlyjs = True + if shared_js: + plotlyjs = "directory" + # CDN takes precedence over directory + if cdn_js: + plotlyjs = "cdn" + from wily.commands.graph import graph logger.debug("Running report on %s for metrics %s", path, metrics) @@ -441,6 +465,7 @@ def graph(ctx, path, metrics, output, x_axis, changes, aggregate): x_axis=x_axis, changes=changes, aggregate=aggregate, + plotlyjs=plotlyjs, ) diff --git a/src/wily/commands/graph.py b/src/wily/commands/graph.py index 43bbdbd..b4dc99d 100644 --- a/src/wily/commands/graph.py +++ b/src/wily/commands/graph.py @@ -5,7 +5,7 @@ Draw graph in HTML for a specific metric. """ from pathlib import Path -from typing import Optional, Tuple +from typing import Optional, Tuple, Union import plotly.graph_objs as go import plotly.offline @@ -38,6 +38,7 @@ def graph( changes: bool = True, text: bool = False, aggregate: bool = False, + plotlyjs: Union[bool, str] = True, ) -> None: """ Graph information about the cache and runtime. @@ -50,6 +51,7 @@ def graph( :param changes: Only graph changes. :param text: Show commit message inline in graph. :param aggregate: Aggregate values for graph. + :param plotlyjs: How to include plotly.min.js. """ logger.debug("Running graph command") @@ -169,4 +171,5 @@ def graph( }, auto_open=auto_open, filename=filename, + include_plotlyjs=plotlyjs, # type: ignore )
tonybaloney/wily
ec495118115ca36706fc3f4e6a56842559fb1da1
diff --git a/test/integration/test_graph.py b/test/integration/test_graph.py index 7596f1b..78b576a 100644 --- a/test/integration/test_graph.py +++ b/test/integration/test_graph.py @@ -45,6 +45,36 @@ def test_graph(builddir): assert result.exit_code == 0, result.stdout +def test_graph_shared_js(builddir): + """Test the graph feature with --shared-js option""" + runner = CliRunner() + with patch.dict("os.environ", values=PATCHED_ENV, clear=True): + result = runner.invoke( + main.cli, + [ + "--path", + builddir, + "graph", + _path, + "-m", + "raw.loc", + "--shared-js", + ], + ) + assert result.exit_code == 0, result.stdout + + +def test_graph_plotlyjs_cdn_js(builddir): + """Test the graph feature with --cdn_js option""" + runner = CliRunner() + with patch.dict("os.environ", values=PATCHED_ENV, clear=True): + result = runner.invoke( + main.cli, + ["--path", builddir, "graph", _path, "-m", "raw.loc", " --cdn_js"], + ) + assert result.exit_code == 0, result.stdout + + def test_graph_all(builddir): """Test the graph feature""" runner = CliRunner()
Allow standalone plotly.min.js when creating graphs When creating graphs, wily currently inlines the contents of `plotly.min.js` in HTML files, making their size around 3.4MB each. When [creating a lot of graphs](https://gist.github.com/devdanzin/513dfb256686a5d33f727f8247a87184), this quickly adds up to a lot of space. For example, bulk creating graphs for every metric for every file that ever existed in wily's repository takes around 5GB in about 1500 HTML files. Plotly has an option to create `plotly.min.js` once, in the same directory, and reference it from the HTML file. It's enabled by calling `plotly.offline.plot` with `include_plotlyjs="directory"`. It reduces the size of the same 1500 graphs from 5GB to under 100MB. I'm not sure adding this feature would be in scope for wily, but I have it working locally and could contribute a PR if it's desirable. It would also add a new CLI option for graph to enable this behavior.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/integration/test_graph.py::test_graph_shared_js" ]
[ "test/integration/test_graph.py::test_graph_no_cache", "test/integration/test_graph.py::test_graph_no_path", "test/integration/test_graph.py::test_graph", "test/integration/test_graph.py::test_graph_plotlyjs_cdn_js", "test/integration/test_graph.py::test_graph_all", "test/integration/test_graph.py::test_graph_all_with_shorthand_metric", "test/integration/test_graph.py::test_graph_changes", "test/integration/test_graph.py::test_graph_custom_x", "test/integration/test_graph.py::test_graph_aggregate", "test/integration/test_graph.py::test_graph_individual", "test/integration/test_graph.py::test_graph_path", "test/integration/test_graph.py::test_graph_multiple", "test/integration/test_graph.py::test_graph_multiple_custom_x", "test/integration/test_graph.py::test_graph_multiple_path", "test/integration/test_graph.py::test_graph_output", "test/integration/test_graph.py::test_graph_output_granular", "test/integration/test_graph.py::test_graph_multiple_paths" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-08-16T20:01:44Z"
apache-2.0
tophat__syrupy-607
diff --git a/src/syrupy/location.py b/src/syrupy/location.py index 31207e7..ca4aa26 100644 --- a/src/syrupy/location.py +++ b/src/syrupy/location.py @@ -114,4 +114,7 @@ class PyTestLocation: return self.__parse(self.snapshot_name) == self.__parse(snapshot_name) def matches_snapshot_location(self, snapshot_location: str) -> bool: - return self.filename in snapshot_location + loc = Path(snapshot_location) + # "test_file" should match_"test_file.ext" or "test_file/whatever.ext", but not + # "test_file_suffix.ext" + return self.filename == loc.stem or self.filename == loc.parent.name
tophat/syrupy
a472b447c15e4200693a6f6011a6ac07d6f8caa5
diff --git a/tests/integration/test_snapshot_similar_names_default.py b/tests/integration/test_snapshot_similar_names_default.py index abc06a6..5ba1e18 100644 --- a/tests/integration/test_snapshot_similar_names_default.py +++ b/tests/integration/test_snapshot_similar_names_default.py @@ -16,29 +16,37 @@ def testcases(): assert snapshot == 'b' """ ), + "a_suffix": ( + """ + def test_a_suffix(snapshot): + assert snapshot == 'a_suffix' + """ + ), } @pytest.fixture def run_testcases(testdir, testcases): pyfile_content = "\n\n".join(testcases.values()) - testdir.makepyfile(test_1=pyfile_content, test_2=pyfile_content) + testdir.makepyfile( + test_1=pyfile_content, test_2=pyfile_content, test_1_with_suffix=pyfile_content + ) result = testdir.runpytest("-v", "--snapshot-update") - result.stdout.re_match_lines((r"4 snapshots generated\.")) + result.stdout.re_match_lines((r"9 snapshots generated\.")) return testdir, testcases def test_run_all(run_testcases): testdir, testcases = run_testcases result = testdir.runpytest("-v") - result.stdout.re_match_lines("4 snapshots passed") + result.stdout.re_match_lines("9 snapshots passed") assert result.ret == 0 def test_run_single_file(run_testcases): testdir, testcases = run_testcases result = testdir.runpytest("-v", "test_1.py") - result.stdout.re_match_lines("2 snapshots passed") + result.stdout.re_match_lines("3 snapshots passed") assert result.ret == 0 @@ -54,7 +62,7 @@ def test_run_all_but_one(run_testcases): result = testdir.runpytest( "-v", "--snapshot-details", "test_1.py", "test_2.py::test_a" ) - result.stdout.re_match_lines("3 snapshots passed") + result.stdout.re_match_lines("4 snapshots passed") assert result.ret == 0 diff --git a/tests/integration/test_snapshot_similar_names_file_extension.py b/tests/integration/test_snapshot_similar_names_file_extension.py index 19d1131..458d407 100644 --- a/tests/integration/test_snapshot_similar_names_file_extension.py +++ b/tests/integration/test_snapshot_similar_names_file_extension.py @@ -16,20 +16,28 @@ def testcases(): assert snapshot == b"b" """ ), + "a_suffix": ( + """ + def test_a_suffix(snapshot): + assert snapshot == b"a_suffix" + """ + ), } @pytest.fixture def run_testcases(testdir, testcases): pyfile_content = "\n\n".join(testcases.values()) - testdir.makepyfile(test_1=pyfile_content, test_2=pyfile_content) + testdir.makepyfile( + test_1=pyfile_content, test_2=pyfile_content, test_1_suffix=pyfile_content + ) result = testdir.runpytest( "-v", "--snapshot-update", "--snapshot-default-extension", "syrupy.extensions.single_file.SingleFileSnapshotExtension", ) - result.stdout.re_match_lines((r"4 snapshots generated\.")) + result.stdout.re_match_lines((r"9 snapshots generated\.")) return testdir, testcases @@ -40,7 +48,7 @@ def test_run_all(run_testcases): "--snapshot-default-extension", "syrupy.extensions.single_file.SingleFileSnapshotExtension", ) - result.stdout.re_match_lines("4 snapshots passed") + result.stdout.re_match_lines("9 snapshots passed") assert result.ret == 0 @@ -52,7 +60,7 @@ def test_run_single_file(run_testcases): "syrupy.extensions.single_file.SingleFileSnapshotExtension", "test_1.py", ) - result.stdout.re_match_lines("2 snapshots passed") + result.stdout.re_match_lines("3 snapshots passed") assert result.ret == 0 @@ -78,7 +86,7 @@ def test_run_all_but_one(run_testcases): "test_1.py", "test_2.py::test_a", ) - result.stdout.re_match_lines("3 snapshots passed") + result.stdout.re_match_lines("4 snapshots passed") assert result.ret == 0 diff --git a/tests/syrupy/test_location.py b/tests/syrupy/test_location.py index 7162559..6da7f9a 100644 --- a/tests/syrupy/test_location.py +++ b/tests/syrupy/test_location.py @@ -67,7 +67,15 @@ def test_location_properties( "/tests/module/test_file.py::TestClass::method_name", "method_name", ("test_file.snap", "__snapshots__/test_file", "test_file/1.snap"), - ("test.snap", "__others__/test/file.snap"), + ( + "test.snap", + "__others__/test/file.snap", + "test_file_extra.snap", + "__snapshots__/test_file_extra", + "test_file_extra/1.snap", + "test_file/extra/1.snap", + "__snapshots__/test_file/extra/even/more/1.snap", + ), ( "TestClass.method_name", "TestClass.method_name[1]", @@ -79,7 +87,15 @@ def test_location_properties( "/tests/module/test_file.py::TestClass::method_name[1]", "method_name", ("test_file.snap", "__snapshots__/test_file", "test_file/1.snap"), - ("test.snap", "__others__/test/file.snap"), + ( + "test.snap", + "__others__/test/file.snap", + "test_file_extra.snap", + "__snapshots__/test_file_extra", + "test_file_extra/1.snap", + "test_file/extra/1.snap", + "__snapshots__/test_file/extra/even/more/1.snap", + ), ( "TestClass.method_name", "TestClass.method_name[1]",
Unused snapshots when running on filename (test_a.py) that's the prefix of another (test_abc.py) **Describe the bug** I have a project that has filenames where the non-`.py` part happens to be a prefix of another (e.g. `test_a.py` vs. `test_abc.py`). When running `test_a.py` in isolation, syrupy picks up the snapshots from `test_abc.py` and either warns/errors about them or deletes them (depending on if `--snapshot-update`) is specified. (Thanks for syrupy!) **To reproduce** ```python # test_a.py def test_foo(snapshot): assert snapshot == "a" ``` ```python # test_abc.py def test_bar(snapshot): assert snapshot == "abc" ``` ``` # requirements.txt attrs==21.4.0 colored==1.4.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.0.8 pytest==7.1.1 syrupy==1.7.4 tomli==2.0.1 ``` With the above code, if I run `pytest test_a.py`, the snapshot from `test_abc.py` is flagged as unused: ``` ================================= test session starts ================================= platform darwin -- Python 3.9.10, pytest-7.1.1, pluggy-1.0.0 rootdir: .../syrupy-file-prefix plugins: syrupy-1.7.4 collected 1 item test_a.py . [100%] ------------------------------- snapshot report summary ------------------------------- 1 snapshot passed. 1 snapshot unused. Unused test_bar (__snapshots__/test_abc.ambr) Re-run pytest with --snapshot-update to delete unused snapshots. ================================== 1 passed in 0.01s ================================== ``` Fully packaged: ```shell # set up virtualenv/deps/files python --version # Python 3.9.10 python -m venv venv . venv/bin/activate pip install attrs==21.4.0 colored==1.4.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.0.8 pytest==7.1.1 syrupy==1.7.4 tomli==2.0.1 echo 'def test_foo(snapshot): assert snapshot == "a"' > test_a.py echo 'def test_bar(snapshot): assert snapshot == "abc"' > test_abc.py # create snapshots pytest --snapshot-update # 2 snapshots generated. pytest # 2 snapshots passed. # running test_abc.py: all okay pytest test_abc.py --snapshot-details # 1 snapshot passed. # running test_a.py: unused snapshots (BUG) pytest test_a.py --snapshot-details # Unused test_bar (__snapshots__/test_abc.ambr) pytest test_a.py --snapshot-details --snapshot-update # Deleted test_bar (__snapshots__/test_abc.ambr) ``` **Expected behavior** When running pytest against a single/subset of files, snapshots from other files shouldn't be considered unused. **Screenshots** <!-- If applicable, add screenshots to help explain your problem. --> **Environment (please complete the following information):** - OS: macOS 12.3 - Syrupy Version: 1.7.4 - Python Version: 3.9.10 **Additional context** N/A
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/integration/test_snapshot_similar_names_default.py::test_run_single_file", "tests/integration/test_snapshot_similar_names_default.py::test_run_all_but_one", "tests/integration/test_snapshot_similar_names_default.py::test_run_both_files_by_node", "tests/integration/test_snapshot_similar_names_default.py::test_run_both_files_by_node_2", "tests/syrupy/test_location.py::test_location_matching[/tests/module/test_file.py::TestClass::method_name-method_name-expected_location_matches0-expected_location_misses0-expected_snapshot_matches0-expected_snapshot_misses0]", "tests/syrupy/test_location.py::test_location_matching[/tests/module/test_file.py::TestClass::method_name[1]-method_name-expected_location_matches1-expected_location_misses1-expected_snapshot_matches1-expected_snapshot_misses1]" ]
[ "tests/integration/test_snapshot_similar_names_default.py::test_run_all", "tests/integration/test_snapshot_similar_names_default.py::test_run_single_test_case_in_file", "tests/integration/test_snapshot_similar_names_file_extension.py::test_run_all", "tests/integration/test_snapshot_similar_names_file_extension.py::test_run_single_file", "tests/integration/test_snapshot_similar_names_file_extension.py::test_run_single_test_case_in_file", "tests/integration/test_snapshot_similar_names_file_extension.py::test_run_all_but_one", "tests/integration/test_snapshot_similar_names_file_extension.py::test_run_both_files_by_node", "tests/integration/test_snapshot_similar_names_file_extension.py::test_run_both_files_by_node_2", "tests/syrupy/test_location.py::test_location_properties[/tests/module/test_file.py::TestClass::method_name-method_name-test_file-TestClass-TestClass.method_name]", "tests/syrupy/test_location.py::test_location_properties[/tests/module/test_file.py::TestClass::method_name[1]-method_name-test_file-TestClass-TestClass.method_name[1]]", "tests/syrupy/test_location.py::test_location_properties[/tests/module/nest/test_file.py::TestClass::TestSubClass::method_name-method_name-test_file-TestClass.TestSubClass-TestClass.TestSubClass.method_name]" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2022-07-05T04:43:18Z"
apache-2.0
tophat__syrupy-621
diff --git a/src/syrupy/report.py b/src/syrupy/report.py index e642450..09b3991 100644 --- a/src/syrupy/report.py +++ b/src/syrupy/report.py @@ -299,7 +299,14 @@ class SnapshotReport: for snapshot_fossil in self.unused: filepath = snapshot_fossil.location snapshots = (snapshot.name for snapshot in snapshot_fossil) - path_to_file = str(Path(filepath).relative_to(self.base_dir)) + + try: + path_to_file = str(Path(filepath).relative_to(self.base_dir)) + except ValueError: + # this is just used for display, so better to fallback to + # something vaguely reasonable (the full path) than give up + path_to_file = filepath + unused_snapshots = ", ".join(map(bold, sorted(snapshots))) yield warning_style(gettext(base_message)) + " {} ({})".format( unused_snapshots, path_to_file
tophat/syrupy
f49678136d89efd1e8b929487e0a360720f4fc6b
diff --git a/tests/integration/test_snapshot_outside_directory.py b/tests/integration/test_snapshot_outside_directory.py new file mode 100644 index 0000000..b241c6f --- /dev/null +++ b/tests/integration/test_snapshot_outside_directory.py @@ -0,0 +1,81 @@ +import pytest + + [email protected] +def testcases(testdir, tmp_path): + dirname = tmp_path.joinpath("__snapshots__") + testdir.makeconftest( + f""" + import pytest + + from syrupy.extensions.amber import AmberSnapshotExtension + + class CustomSnapshotExtension(AmberSnapshotExtension): + @property + def _dirname(self): + return {str(dirname)!r} + + @pytest.fixture + def snapshot(snapshot): + return snapshot.use_extension(CustomSnapshotExtension) + """ + ) + return { + "zero": ( + """ + def test_do_it(snapshot): + pass + """ + ), + "one": ( + """ + def test_do_it(snapshot): + assert snapshot == 'passed1' + """ + ), + "two": ( + """ + def test_do_it(snapshot): + assert snapshot == 'passed1' + assert snapshot == 'passed2' + """ + ), + } + + [email protected] +def generate_snapshots(testdir, testcases): + testdir.makepyfile(test_file=testcases["two"]) + result = testdir.runpytest("-v", "--snapshot-update") + return result, testdir, testcases + + +def test_generated_snapshots(generate_snapshots): + result = generate_snapshots[0] + result.stdout.re_match_lines((r"2 snapshots generated\.")) + assert "snapshots unused" not in result.stdout.str() + assert result.ret == 0 + + +def test_unmatched_snapshots(generate_snapshots): + _, testdir, testcases = generate_snapshots + testdir.makepyfile(test_file=testcases["one"]) + result = testdir.runpytest("-v") + result.stdout.re_match_lines((r"1 snapshot passed. 1 snapshot unused\.")) + assert result.ret == 1 + + +def test_updated_snapshots_partial_delete(generate_snapshots): + _, testdir, testcases = generate_snapshots + testdir.makepyfile(test_file=testcases["one"]) + result = testdir.runpytest("-v", "--snapshot-update") + result.stdout.re_match_lines(r"1 snapshot passed. 1 unused snapshot deleted\.") + assert result.ret == 0 + + +def test_updated_snapshots_full_delete(generate_snapshots): + _, testdir, testcases = generate_snapshots + testdir.makepyfile(test_file=testcases["zero"]) + result = testdir.runpytest("-v", "--snapshot-update") + result.stdout.re_match_lines(r"2 unused snapshots deleted\.") + assert result.ret == 0
Custom extension with _dirname outside the pytest session directory results in crash during (detailed) reporting **Describe the bug** Currently a custom extension can write snapshots to a new directory by overriding the `_dirname` property, however, this directory has to be a child of pytest's root directory, or else syrupy's reporting crashes with an error like: ``` ValueError: '/tmp/example/__snapshots__/test_file.ambr' is not in the subpath of '/.../path/to/tests' OR one path is relative and the other is absolute. ``` (This is a very niche bug, sorry. Doing this directory hacking is designed to allow building a work-around for https://github.com/pantsbuild/pants/issues/11622.) **To reproduce** ```python import pytest from syrupy.extensions.amber import AmberSnapshotExtension class CustomSnapshotExtension(AmberSnapshotExtension): @property def _dirname(self): return '/tmp/example/__snapshots__' @pytest.fixture def snapshot(snapshot): return snapshot.use_extension(CustomSnapshotExtension) def test_do_it(snapshot): assert "one" == snapshot assert "two" == snapshot ``` 1. run the tests above with `--snapshot-update` 2. comment out the `"two"` line 3. run the tests again with `--snapshot-update` or `--snapshot-details` Output: ``` $ pytest test_file.py --snapshot-details ======================================================================================================================================================= test session starts ======================================================================================================================================================== platform darwin -- Python 3.10.4, pytest-7.1.3, pluggy-1.0.0 rootdir: /Users/huon/projects/tophat/syrupy, configfile: pyproject.toml plugins: syrupy-1.7.3 collected 1 item test_file.py . [100%] ----------------------------------------------------------------------------------------------------------------------------------------------------- snapshot report summary ------------------------------------------------------------------------------------------------------------------------------------------------------ 1 snapshot passed. 1 snapshot unused. Traceback (most recent call last): File "/Users/huon/projects/tophat/syrupy/test/bin/pytest", line 8, in <module> sys.exit(console_main()) ... File "/Users/huon/projects/tophat/syrupy/src/syrupy/report.py", line 304, in lines path_to_file = str(Path(filepath).relative_to(self.base_dir)) File "/Users/huon/.pyenv/versions/3.10.4/lib/python3.10/pathlib.py", line 816, in relative_to raise ValueError("{!r} is not in the subpath of {!r}" ``` **Expected behavior** Syrupy should behave as normal even with a 'weird' snapshot directory like this. **Screenshots** <!-- If applicable, add screenshots to help explain your problem. --> **Environment (please complete the following information):** - OS: macOS - Syrupy Version: 3.0.0 - Python Version: 3.10.4 **Additional context** Thanks for Syrupy!
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/integration/test_snapshot_outside_directory.py::test_updated_snapshots_partial_delete", "tests/integration/test_snapshot_outside_directory.py::test_updated_snapshots_full_delete" ]
[ "tests/integration/test_snapshot_outside_directory.py::test_generated_snapshots", "tests/integration/test_snapshot_outside_directory.py::test_unmatched_snapshots" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2022-09-20T05:47:02Z"
apache-2.0
tophat__syrupy-634
diff --git a/src/syrupy/terminal.py b/src/syrupy/terminal.py index dec1532..f59d696 100644 --- a/src/syrupy/terminal.py +++ b/src/syrupy/terminal.py @@ -13,6 +13,24 @@ def _is_color_disabled() -> bool: return any(map(get_env_value, DISABLE_COLOR_ENV_VARS)) +def _attr(color: Any) -> str: + if _is_color_disabled(): + return "" + return colored.attr(color) + + +def _fg(color: Any) -> str: + if _is_color_disabled(): + return "" + return colored.fg(color) + + +def _bg(color: Any) -> str: + if _is_color_disabled(): + return "" + return colored.bg(color) + + def _stylize(text: Union[str, int], *args: Any) -> str: if _is_color_disabled(): return str(text) @@ -20,23 +38,23 @@ def _stylize(text: Union[str, int], *args: Any) -> str: def reset(text: Union[str, int]) -> str: - return _stylize(text, colored.attr("reset")) + return _stylize(text, _attr("reset")) def red(text: Union[str, int]) -> str: - return _stylize(text, colored.fg("red")) + return _stylize(text, _fg("red")) def yellow(text: Union[str, int]) -> str: - return _stylize(text, colored.fg("yellow")) + return _stylize(text, _fg("yellow")) def green(text: Union[str, int]) -> str: - return _stylize(text, colored.fg("green")) + return _stylize(text, _fg("green")) def bold(text: Union[str, int]) -> str: - return _stylize(text, colored.attr("bold")) + return _stylize(text, _attr("bold")) def error_style(text: Union[str, int]) -> str: @@ -52,20 +70,20 @@ def success_style(text: Union[str, int]) -> str: def snapshot_style(text: Union[str, int]) -> str: - return _stylize(text, colored.bg(225) + colored.fg(90)) + return _stylize(text, _bg(225) + _fg(90)) def snapshot_diff_style(text: Union[str, int]) -> str: - return _stylize(text, colored.bg(90) + colored.fg(225)) + return _stylize(text, _bg(90) + _fg(225)) def received_style(text: Union[str, int]) -> str: - return _stylize(text, colored.bg(195) + colored.fg(23)) + return _stylize(text, _bg(195) + _fg(23)) def received_diff_style(text: Union[str, int]) -> str: - return _stylize(text, colored.bg(23) + colored.fg(195)) + return _stylize(text, _bg(23) + _fg(195)) def context_style(text: Union[str, int]) -> str: - return _stylize(text, colored.attr("dim")) + return _stylize(text, _attr("dim"))
tophat/syrupy
dccc789522b96d6dc0a1608828e71b96fee8c215
diff --git a/tests/syrupy/test_terminal.py b/tests/syrupy/test_terminal.py new file mode 100644 index 0000000..cf9df9f --- /dev/null +++ b/tests/syrupy/test_terminal.py @@ -0,0 +1,58 @@ +from unittest.mock import ( + NonCallableMock, + patch, +) + +import pytest + +from syrupy.constants import DISABLE_COLOR_ENV_VAR +from syrupy.terminal import ( + bold, + context_style, + error_style, + green, + received_diff_style, + received_style, + red, + reset, + snapshot_diff_style, + snapshot_style, + success_style, + warning_style, + yellow, +) + + +def test_colors_off_does_not_call_colored(): + """ + Test that disabling colors prevents instantiating colored object. + Enables workarounds for when instantiating the colored object causes crashes, + see issue #633 + """ + + with patch( + "syrupy.terminal.colored.colored.__init__", new_callable=NonCallableMock + ): + with patch.dict("os.environ", {DISABLE_COLOR_ENV_VAR: "true"}): + for method in ( + reset, + red, + yellow, + green, + bold, + error_style, + warning_style, + success_style, + snapshot_style, + snapshot_diff_style, + received_style, + received_diff_style, + context_style, + ): + _ = method("foo") + + # Prevent test from accidentally passing by patching wrong object + with pytest.raises(TypeError) as excinfo: + _ = red("foo") + + assert "NonCallableMock" in str(excinfo.value)
Report summary crashes on Windows 10 **Describe the bug** When printing a summary of snapshots, syrupy formats the text using `colored.fg`. This triggers a crash, as `colored.fg` uses incorrect types when interfacing with the kernel32 dll. An [issue](https://gitlab.com/dslackw/colored/-/issues/25) and corresponding [merge request](https://gitlab.com/dslackw/colored/-/merge_requests/19) has been created on colored's repository. Unfortunately, this can not be bypassed via `--snapshot-no-colors` or other environment variables, as the call to `colored.fg` occurs _before_ we check for those. **To reproduce** Run any snapshot test on (the above version of) Windows. Result: ``` -------------- snapshot report summary --------------- Traceback (most recent call last): File "...\Python310\lib\runpy.py", line 196, in _run_module_as_main return _run_code(code, main_globals, None, File "...\Python310\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "...\Scripts\pytest.exe\__main__.py", line 7, in <module> File "...\lib\site-packages\_pytest\config\__init__.py", line 185, in console_main code = main() File "...\lib\site-packages\_pytest\config\__init__.py", line 162, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "...\lib\site-packages\pluggy\_hooks.py", line 265, in __call__ return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult) File "...\lib\site-packages\pluggy\_manager.py", line 80, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "...\lib\site-packages\pluggy\_callers.py", line 60, in _multicall return outcome.get_result() File "...\lib\site-packages\pluggy\_result.py", line 60, in get_result raise ex[1].with_traceback(ex[2]) File "...\lib\site-packages\pluggy\_callers.py", line 39, in _multicall res = hook_impl.function(*args) File "...\lib\site-packages\_pytest\main.py", line 316, in pytest_cmdline_main return wrap_session(config, _main) File "...\lib\site-packages\_pytest\main.py", line 304, in wrap_session config.hook.pytest_sessionfinish( File "...\lib\site-packages\pluggy\_hooks.py", line 265, in __call__ return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult) File "...\lib\site-packages\pluggy\_manager.py", line 80, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "...\lib\site-packages\pluggy\_callers.py", line 55, in _multicall gen.send(outcome) File "...\lib\site-packages\_pytest\terminal.py", line 813, in pytest_sessionfinish self.config.hook.pytest_terminal_summary( File "...\lib\site-packages\pluggy\_hooks.py", line 265, in __call__ return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult) File "...\lib\site-packages\pluggy\_manager.py", line 80, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "...\lib\site-packages\pluggy\_callers.py", line 60, in _multicall return outcome.get_result() File "...\lib\site-packages\pluggy\_result.py", line 60, in get_result raise ex[1].with_traceback(ex[2]) File "...\lib\site-packages\pluggy\_callers.py", line 39, in _multicall res = hook_impl.function(*args) File "...\lib\site-packages\syrupy\__init__.py", line 170, in pytest_terminal_summary for line in terminalreporter.config._syrupy.report.lines: File "...\lib\site-packages\syrupy\report.py", line 277, in lines ).format(green(self.num_updated)) File "...\lib\site-packages\syrupy\terminal.py", line 34, in green return _stylize(text, colored.fg("green")) File "...\lib\site-packages\colored\colored.py", line 431, in fg return colored(color).foreground() File "...\lib\site-packages\colored\colored.py", line 23, in __init__ self.enable_windows_terminal_mode() File "...\lib\site-packages\colored\colored.py", line 374, in enable_windows_terminal_mode hStdout = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE) ctypes.ArgumentError: argument 1: <class 'TypeError'>: wrong type ``` **Expected behavior** No crash **Environment (please complete the following information):** - OS: Windows 10 Enterprise 21H2 19044.2130 - Syrupy Version: 3.0.4 - Python Version: Python 3.10.4 (tags/v3.10.4:9d38120, Mar 23 2022, 23:13:41) [MSC v.1929 64 bit (AMD64)] on win32 - (Colored Version: 1.4.3) **Additional context** It would be helpful if specifying `NO_COLOR` et al. would pre-empt calling `colored.fg` - that way, we would be able to bypass the issue without any nasty hacks. In lieu of that, the following nasty hack acts as a workaround: In the test module (or possibly conftest.py, haven't checked), add: ``` import colored colored.colored.enable_windows_terminal_mode = lambda self: None ``` Then run the tests as normal ( possibly with `--snapshot-no-colors`, although I didn't notice any difference).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/syrupy/test_terminal.py::test_colors_off_does_not_call_colored" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2022-11-08T08:46:12Z"
apache-2.0
tophat__syrupy-672
diff --git a/src/syrupy/extensions/json/__init__.py b/src/syrupy/extensions/json/__init__.py index 0b9a954..d35a1ef 100644 --- a/src/syrupy/extensions/json/__init__.py +++ b/src/syrupy/extensions/json/__init__.py @@ -64,7 +64,7 @@ class JSONSnapshotExtension(SingleFileSnapshotExtension): elif matcher: data = matcher(data=data, path=path) - if isinstance(data, (int, float, str)): + if isinstance(data, (int, float, str)) or data is None: return data filtered_dct: Dict[Any, Any]
tophat/syrupy
f9c6abaa30b5ffb3e9e5beaa9f74d73539ab6c1f
diff --git a/tests/syrupy/extensions/json/__snapshots__/test_json_filters/test_exclude_in_json_with_empty_values.json b/tests/syrupy/extensions/json/__snapshots__/test_json_filters/test_exclude_in_json_with_empty_values.json index 2e8f310..71d5f1e 100644 --- a/tests/syrupy/extensions/json/__snapshots__/test_json_filters/test_exclude_in_json_with_empty_values.json +++ b/tests/syrupy/extensions/json/__snapshots__/test_json_filters/test_exclude_in_json_with_empty_values.json @@ -1,5 +1,5 @@ { "empty_dict": {}, "empty_list": [], - "none": "None" + "none": null } diff --git a/tests/syrupy/extensions/json/__snapshots__/test_json_filters/test_serializer[content2].json b/tests/syrupy/extensions/json/__snapshots__/test_json_filters/test_serializer[content2].json index f518b00..990521b 100644 --- a/tests/syrupy/extensions/json/__snapshots__/test_json_filters/test_serializer[content2].json +++ b/tests/syrupy/extensions/json/__snapshots__/test_json_filters/test_serializer[content2].json @@ -7,6 +7,6 @@ "datetime": "2021-01-31T23:59:00.000000", "float": 4.2, "int": -1, - "null": "None", + "null": null, "str": "foo" } diff --git a/tests/syrupy/extensions/json/__snapshots__/test_json_serializer/test_dict[actual2].json b/tests/syrupy/extensions/json/__snapshots__/test_json_serializer/test_dict[actual2].json index 658b260..48e8cd1 100644 --- a/tests/syrupy/extensions/json/__snapshots__/test_json_serializer/test_dict[actual2].json +++ b/tests/syrupy/extensions/json/__snapshots__/test_json_serializer/test_dict[actual2].json @@ -1,4 +1,5 @@ { "a": "Some ttext.", + "key": null, "multi\nline\nkey": "Some morre text." } diff --git a/tests/syrupy/extensions/json/__snapshots__/test_json_serializer/test_empty_snapshot.json b/tests/syrupy/extensions/json/__snapshots__/test_json_serializer/test_empty_snapshot.json index 68cab94..19765bd 100644 --- a/tests/syrupy/extensions/json/__snapshots__/test_json_serializer/test_empty_snapshot.json +++ b/tests/syrupy/extensions/json/__snapshots__/test_json_serializer/test_empty_snapshot.json @@ -1,1 +1,1 @@ -"None" +null diff --git a/tests/syrupy/extensions/json/test_json_serializer.py b/tests/syrupy/extensions/json/test_json_serializer.py index 7a6a2c1..9c1e359 100644 --- a/tests/syrupy/extensions/json/test_json_serializer.py +++ b/tests/syrupy/extensions/json/test_json_serializer.py @@ -124,6 +124,7 @@ def test_set(snapshot_json, actual): "multi\nline\nkey": "Some morre text.", frozenset({"1", "2"}): ["1", 2], ExampleTuple(a=1, b=2, c=3, d=4): {"e": False}, + "key": None, }, {}, {"key": ["line1\nline2"]},
JSONSnapshotExtension None is serialized as "None" instead of null Is it intended behaviour that `None` is not translated to `null` with the JSONSnapshotExtension? ```python @pytest.fixture def snapshot_json(snapshot): return snapshot.use_extension(JSONSnapshotExtension) def test_output(snapshot_json): assert {"x": None} == snapshot_json() ``` Actual Output: ```json { "x": "None" } ``` Expected Output: ```json { "x": null } ``` Digging into the code, it looks like there's no handling of None in _filter, and it eventually reaches `return repr(None)` line. I can wrap the existing class with the following code: ```python import pytest from syrupy.extensions.json import JSONSnapshotExtension @pytest.fixture def snapshot_json(snapshot): class CustomJSONExtension(JSONSnapshotExtension): @classmethod def _filter( cls, data, **kwargs ): if data is None: return data else: return super()._filter(data, **kwargs) return snapshot.use_extension(CustomJSONExtension) ``` Was wondering if there's a different way to get this behaviour?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/syrupy/extensions/json/test_json_serializer.py::test_empty_snapshot", "tests/syrupy/extensions/json/test_json_serializer.py::test_dict[actual2]" ]
[ "tests/syrupy/extensions/json/test_json_serializer.py::test_non_snapshots", "tests/syrupy/extensions/json/test_json_serializer.py::test_reflection", "tests/syrupy/extensions/json/test_json_serializer.py::test_snapshot_markers", "tests/syrupy/extensions/json/test_json_serializer.py::test_newline_control_characters", "tests/syrupy/extensions/json/test_json_serializer.py::test_multiline_string_in_dict", "tests/syrupy/extensions/json/test_json_serializer.py::test_deeply_nested_multiline_string_in_dict", "tests/syrupy/extensions/json/test_json_serializer.py::test_bool[False]", "tests/syrupy/extensions/json/test_json_serializer.py::test_bool[True]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[0]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[1]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[2]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[3]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[4]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[5]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[6]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[7]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[8]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[9]", "tests/syrupy/extensions/json/test_json_serializer.py::test_string[10]", "tests/syrupy/extensions/json/test_json_serializer.py::test_multiple_snapshots", "tests/syrupy/extensions/json/test_json_serializer.py::test_tuple", "tests/syrupy/extensions/json/test_json_serializer.py::test_set[actual0]", "tests/syrupy/extensions/json/test_json_serializer.py::test_set[actual1]", "tests/syrupy/extensions/json/test_json_serializer.py::test_set[actual2]", "tests/syrupy/extensions/json/test_json_serializer.py::test_set[actual3]", "tests/syrupy/extensions/json/test_json_serializer.py::test_set[actual4]", "tests/syrupy/extensions/json/test_json_serializer.py::test_dict[actual0]", "tests/syrupy/extensions/json/test_json_serializer.py::test_dict[actual1]", "tests/syrupy/extensions/json/test_json_serializer.py::test_dict[actual3]", "tests/syrupy/extensions/json/test_json_serializer.py::test_dict[actual4]", "tests/syrupy/extensions/json/test_json_serializer.py::test_dict[actual5]", "tests/syrupy/extensions/json/test_json_serializer.py::test_dict[actual6]", "tests/syrupy/extensions/json/test_json_serializer.py::test_numbers", "tests/syrupy/extensions/json/test_json_serializer.py::test_list[actual0]", "tests/syrupy/extensions/json/test_json_serializer.py::test_list[actual1]", "tests/syrupy/extensions/json/test_json_serializer.py::test_list[actual2]", "tests/syrupy/extensions/json/test_json_serializer.py::test_list[actual3]", "tests/syrupy/extensions/json/test_json_serializer.py::test_cycle[cyclic0]", "tests/syrupy/extensions/json/test_json_serializer.py::test_cycle[cyclic1]", "tests/syrupy/extensions/json/test_json_serializer.py::test_custom_object_repr", "tests/syrupy/extensions/json/test_json_serializer.py::TestClass::test_class_method_name", "tests/syrupy/extensions/json/test_json_serializer.py::TestClass::test_class_method_parametrized[a]", "tests/syrupy/extensions/json/test_json_serializer.py::TestClass::test_class_method_parametrized[b]", "tests/syrupy/extensions/json/test_json_serializer.py::TestClass::test_class_method_parametrized[c]", "tests/syrupy/extensions/json/test_json_serializer.py::TestClass::TestNestedClass::test_nested_class_method[x]", "tests/syrupy/extensions/json/test_json_serializer.py::TestClass::TestNestedClass::test_nested_class_method[y]", "tests/syrupy/extensions/json/test_json_serializer.py::TestClass::TestNestedClass::test_nested_class_method[z]", "tests/syrupy/extensions/json/test_json_serializer.py::TestSubClass::test_class_method_name", "tests/syrupy/extensions/json/test_json_serializer.py::TestSubClass::test_class_method_parametrized[a]", "tests/syrupy/extensions/json/test_json_serializer.py::TestSubClass::test_class_method_parametrized[b]", "tests/syrupy/extensions/json/test_json_serializer.py::TestSubClass::test_class_method_parametrized[c]", "tests/syrupy/extensions/json/test_json_serializer.py::TestSubClass::TestNestedClass::test_nested_class_method[x]", "tests/syrupy/extensions/json/test_json_serializer.py::TestSubClass::TestNestedClass::test_nested_class_method[y]", "tests/syrupy/extensions/json/test_json_serializer.py::TestSubClass::TestNestedClass::test_nested_class_method[z]", "tests/syrupy/extensions/json/test_json_serializer.py::test_parameter_with_dot[value.with.dot]", "tests/syrupy/extensions/json/test_json_serializer.py::test_doubly_parametrized[bar-foo]" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2022-12-30T18:35:59Z"
apache-2.0
tophat__syrupy-710
diff --git a/src/syrupy/extensions/single_file.py b/src/syrupy/extensions/single_file.py index af53ea4..8e1a7c5 100644 --- a/src/syrupy/extensions/single_file.py +++ b/src/syrupy/extensions/single_file.py @@ -80,8 +80,11 @@ class SingleFileSnapshotExtension(AbstractSyrupyExtension): def _read_snapshot_collection( self, *, snapshot_location: str ) -> "SnapshotCollection": + file_ext_len = len(self._file_extension) + 1 if self._file_extension else 0 + filename_wo_ext = snapshot_location[:-file_ext_len] + snapshot_collection = SnapshotCollection(location=snapshot_location) - snapshot_collection.add(Snapshot(name=Path(snapshot_location).stem)) + snapshot_collection.add(Snapshot(name=Path(filename_wo_ext).stem)) return snapshot_collection def _read_snapshot_data_from_location(
tophat/syrupy
b831ee21b54f0b0dd287a7a0c6e138d6b553f26b
diff --git a/tests/integration/test_single_file_multiple_extensions.py b/tests/integration/test_single_file_multiple_extensions.py new file mode 100644 index 0000000..b93f287 --- /dev/null +++ b/tests/integration/test_single_file_multiple_extensions.py @@ -0,0 +1,40 @@ +from pathlib import Path + + +def test_multiple_file_extensions(testdir): + file_extension = "ext2.ext1" + + testcase = f""" + import pytest + from syrupy.extensions.single_file import SingleFileSnapshotExtension + + class DotInFileExtension(SingleFileSnapshotExtension): + _file_extension = "{file_extension}" + + @pytest.fixture + def snapshot(snapshot): + return snapshot.use_extension(DotInFileExtension) + + def test_dot_in_filename(snapshot): + assert b"expected_data" == snapshot + """ + + test_file: Path = testdir.makepyfile(test_file=testcase) + + result = testdir.runpytest("-v", "--snapshot-update") + result.stdout.re_match_lines((r"1 snapshot generated\.")) + assert "snapshots unused" not in result.stdout.str() + assert result.ret == 0 + + snapshot_file = ( + Path(test_file).parent + / "__snapshots__" + / "test_file" + / f"test_dot_in_filename.{file_extension}" + ) + assert snapshot_file.exists() + + result = testdir.runpytest("-v") + result.stdout.re_match_lines((r"1 snapshot passed\.")) + assert "snapshots unused" not in result.stdout.str() + assert result.ret == 0
having a dot in the file extension causes unexpected behavior **Describe the bug** Hello, Thanks for syrupy! We ran into a small issue when customizing the file extension. As mentioned in the title, I ran into an issue when trying to use an file extension like `png.zip`. I'm thinking, it's related to having and extra `.` in the file extension. ```console $ pytest tests/syrupy/extensions/image/test_dot_in_extension.py --snapshot-update =============================================================== test session starts ================================================================ platform darwin -- Python 3.10.9, pytest-7.2.1, pluggy-1.0.0 benchmark: 4.0.0 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000) rootdir: /Users/tolga.eren/work/syrupy, configfile: pyproject.toml plugins: syrupy-4.0.0, xdist-3.1.0, benchmark-4.0.0 collected 1 item tests/syrupy/extensions/image/test_dot_in_extension.py . [100%] ------------------------------------------------------------- snapshot report summary -------------------------------------------------------------- 1 snapshot passed. 1 unused snapshot deleted. Deleted test_dot_in_file_extension.png (tests/syrupy/extensions/image/__snapshots__/test_dot_in_extension/test_dot_in_file_extension.png.zip) ================================================================ 1 passed in 0.01s ================================================================= ``` The unexpected part is here: 1. Reporting says `1 snapshot passed. 1 unused snapshot deleted.`: There wasn't an unused snapshot and it wasn't deleted 2. If I run the `--snapshot--update` again, now it deletes the snapshot file, which it shoudn't. **To reproduce** I've modified one of the existing tests to reproduce: ```python # tests/syrupy/extensions/image/test_dot_in_extension.py import base64 import pytest from syrupy.extensions.single_file import SingleFileSnapshotExtension class DotInFileExtension(SingleFileSnapshotExtension): _file_extension = "png.zip" actual_png = base64.b64decode( b"iVBORw0KGgoAAAANSUhEUgAAADIAAAAyBAMAAADsEZWCAAAAG1BMVEXMzMy" b"Wlpaqqqq3t7exsbGcnJy+vr6jo6PFxcUFpPI/AAAACXBIWXMAAA7EAAAOxA" b"GVKw4bAAAAQUlEQVQ4jWNgGAWjgP6ASdncAEaiAhaGiACmFhCJLsMaIiDAE" b"QEi0WXYEiMCOCJAJIY9KuYGTC0gknpuHwXDGwAA5fsIZw0iYWYAAAAASUVO" b"RK5CYII=" ) @pytest.fixture def snapshot_dot_in_file_extension(snapshot): return snapshot.use_extension(DotInFileExtension) def test_dot_in_file_extension(snapshot_dot_in_file_extension): assert actual_png == snapshot_dot_in_file_extension ``` Run `pytest tests/syrupy/extensions/image/test_dot_in_extension.py --snapshot-update` twice to observe the unexpected behavior. **Expected behavior** 1. Correct reporting as in : `1 snapshot generated.` 2. and not deleting the generated snapshot in the second update run. **Environment (please complete the following information):** I've tested in the main branch
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/integration/test_single_file_multiple_extensions.py::test_multiple_file_extensions" ]
[]
{ "failed_lite_validators": [ "has_media" ], "has_test_patch": true, "is_lite": false }
"2023-02-16T11:14:42Z"
apache-2.0
tophat__syrupy-734
diff --git a/poetry.lock b/poetry.lock index 069852d..79bedb2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1173,14 +1173,14 @@ jeepney = ">=0.6" [[package]] name = "semver" -version = "2.13.0" -description = "Python helper for Semantic Versioning (http://semver.org/)" +version = "3.0.0" +description = "Python helper for Semantic Versioning (https://semver.org)" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.7" files = [ - {file = "semver-2.13.0-py2.py3-none-any.whl", hash = "sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4"}, - {file = "semver-2.13.0.tar.gz", hash = "sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f"}, + {file = "semver-3.0.0-py3-none-any.whl", hash = "sha256:ab4f69fb1d1ecfb5d81f96411403d7a611fa788c45d252cf5b408025df3ab6ce"}, + {file = "semver-3.0.0.tar.gz", hash = "sha256:94df43924c4521ec7d307fc86da1531db6c2c33d9d5cdc3e64cca0eb68569269"}, ] [[package]] @@ -1329,4 +1329,4 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [metadata] lock-version = "2.0" python-versions = '>=3.8.1,<4' -content-hash = "20ccfa73b2257c72d63a634b9381c9210f0c961511558b679caa42e3bd7558ee" +content-hash = "b1de5497b88df972689ae2b2a96f6cfc61d5270b5839b66a98cc12a261d9473d" diff --git a/pyproject.toml b/pyproject.toml index 1cc9512..8c9ded1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,7 @@ flake8-bugbear = '^23.2.13' flake8-builtins = '^2.1.0' flake8-comprehensions = '^3.10.1' twine = '^4.0.2' -semver = '^2.13.0' +semver = '^3.0.0' setuptools-scm = '^7.1.0' debugpy = '^1.6.6' diff --git a/src/syrupy/__init__.py b/src/syrupy/__init__.py index 5f1a646..560ddb0 100644 --- a/src/syrupy/__init__.py +++ b/src/syrupy/__init__.py @@ -41,11 +41,6 @@ def __import_extension(value: Optional[str]) -> Any: raise argparse.ArgumentTypeError(e) -def __default_extension_option(value: Optional[str]) -> Any: - __import_extension(value) - return value - - def pytest_addoption(parser: Any) -> None: """ Exposes snapshot plugin configuration to pytest. @@ -78,7 +73,6 @@ def pytest_addoption(parser: Any) -> None: # all pytest options to be serializable. group.addoption( "--snapshot-default-extension", - type=__default_extension_option, default=None, dest="default_extension", help="Specify the default snapshot extension", diff --git a/src/syrupy/utils.py b/src/syrupy/utils.py index c086173..4dd08cd 100644 --- a/src/syrupy/utils.py +++ b/src/syrupy/utils.py @@ -48,11 +48,14 @@ def import_module_member(path: str) -> Any: ) ) try: - return getattr(import_module(module_name), module_member_name) + module = import_module(module_name) except ModuleNotFoundError: raise FailedToLoadModuleMember( gettext("Module '{}' does not exist.").format(module_name) ) + + try: + return getattr(module, module_member_name) except AttributeError: raise FailedToLoadModuleMember( gettext("Member '{}' not found in module '{}'.").format( diff --git a/tasks/build.py b/tasks/build.py index ef05530..e66f769 100644 --- a/tasks/build.py +++ b/tasks/build.py @@ -58,7 +58,7 @@ def release(ctx, dry_run=True, version=None): """ Build and publish package to pypi index based on scm version """ - from semver import parse_version_info + from semver.version import Version if not dry_run and not os.environ.get("CI"): print("This is a CI only command") @@ -72,7 +72,8 @@ def release(ctx, dry_run=True, version=None): exit(1) try: - should_publish_to_pypi = not dry_run and parse_version_info(version) + Version.parse(version) + should_publish_to_pypi = not dry_run except ValueError: should_publish_to_pypi = False
tophat/syrupy
028cb8f0c100f6be118d5aacf80974e7503980e2
diff --git a/tests/integration/test_snapshot_option_extension.py b/tests/integration/test_snapshot_option_extension.py index 546da3f..884f92a 100644 --- a/tests/integration/test_snapshot_option_extension.py +++ b/tests/integration/test_snapshot_option_extension.py @@ -37,12 +37,7 @@ def test_snapshot_default_extension_option_failure(testfile): "--snapshot-default-extension", "syrupy.extensions.amber.DoesNotExistExtension", ) - result.stderr.re_match_lines( - ( - r".*error: argument --snapshot-default-extension" - r": Member 'DoesNotExistExtension' not found.*", - ) - ) + result.stdout.re_match_lines((r".*: Member 'DoesNotExistExtension' not found.*",)) assert not Path( testfile.tmpdir, "__snapshots__", "test_file", "test_default.raw" ).exists() diff --git a/tests/integration/test_snapshot_option_extension_pythonpath.py b/tests/integration/test_snapshot_option_extension_pythonpath.py new file mode 100644 index 0000000..3570154 --- /dev/null +++ b/tests/integration/test_snapshot_option_extension_pythonpath.py @@ -0,0 +1,101 @@ +import textwrap +from pathlib import Path + +import pytest + +import syrupy + +SUBDIR = "subdir_not_on_default_path" + + [email protected](autouse=True) +def cache_clear(): + syrupy.__import_extension.cache_clear() + + [email protected] +def testfile(pytester): + subdir = pytester.mkpydir(SUBDIR) + + Path( + subdir, + "extension_file.py", + ).write_text( + data=textwrap.dedent( + """ + from syrupy.extensions.single_file import SingleFileSnapshotExtension + class MySingleFileExtension(SingleFileSnapshotExtension): + pass + """ + ), + encoding="utf-8", + ) + + pytester.makepyfile( + test_file=( + """ + def test_default(snapshot): + assert b"default extension serializer" == snapshot + """ + ) + ) + + return pytester + + +def test_snapshot_default_extension_option_success(testfile): + testfile.makeini( + f""" + [pytest] + pythonpath = + {Path(testfile.path, SUBDIR).as_posix()} + """ + ) + + result = testfile.runpytest( + "-v", + "--snapshot-update", + "--snapshot-default-extension", + "extension_file.MySingleFileExtension", + ) + result.stdout.re_match_lines((r"1 snapshot generated\.")) + assert Path( + testfile.path, "__snapshots__", "test_file", "test_default.raw" + ).exists() + assert not result.ret + + +def test_snapshot_default_extension_option_module_not_found(testfile): + result = testfile.runpytest( + "-v", + "--snapshot-update", + "--snapshot-default-extension", + "extension_file.MySingleFileExtension", + ) + result.stdout.re_match_lines((r".*: Module 'extension_file' does not exist.*",)) + assert not Path( + testfile.path, "__snapshots__", "test_file", "test_default.raw" + ).exists() + assert result.ret + + +def test_snapshot_default_extension_option_failure(testfile): + testfile.makeini( + f""" + [pytest] + pythonpath = + {Path(testfile.path, SUBDIR).as_posix()} + """ + ) + + result = testfile.runpytest( + "-v", + "--snapshot-update", + "--snapshot-default-extension", + "extension_file.DoesNotExistExtension", + ) + result.stdout.re_match_lines((r".*: Member 'DoesNotExistExtension' not found.*",)) + assert not Path( + testfile.path, "__snapshots__", "test_file", "test_default.raw" + ).exists() + assert result.ret
`--snapshot-default-extension` doesn't support pytest 7 `pythonpath` **Describe the bug** `--snapshot-default-extension` doesn't support pytest 7's `pythonpath` configuration option, for pytest-only additions to the Python path. For my project, I'm using `--snapshot-default-extension` so the right extension and serializer are in place, before Syrupy begins its reporting. My Syrupy extensions are for tests only, so they live outside of my src/ folder. Only the src/ folder of my project seems to be on the default Python path. So when running tests, I need to tell Syrupy about my extensions, somehow. I'd love to use the vanilla `pytest` command directly, configured in pyproject.toml, without having to pass a custom `PYTHONPATH` to `pytest` every time. **To reproduce** See my branch, [john-kurkowski/syrupy#default-extension-pythonpath](https://github.com/john-kurkowski/syrupy/compare/main..default-extension-pythonpath). In the final commit, https://github.com/john-kurkowski/syrupy/commit/ea9779371583253c03b0bdf47c09ca6f5526d909, switching from modifying `sys.path` to setting pytest's `--pythonpath` breaks 2/3 of the branch's test cases. **EDIT:** pytest's `pythonpath` an INI configuration option, not CLI. ```diff diff --git a/tests/integration/test_snapshot_option_extension.py b/tests/integration/test_snapshot_option_extension.py index de8e807..42b2eec 100644 --- a/tests/integration/test_snapshot_option_extension.py +++ b/tests/integration/test_snapshot_option_extension.py @@ -26,11 +26,11 @@ def testfile(testdir): return testdir -def test_snapshot_default_extension_option_success(monkeypatch, testfile): - monkeypatch.syspath_prepend(testfile.tmpdir) - +def test_snapshot_default_extension_option_success(testfile): result = testfile.runpytest( "-v", + "--pythonpath", + testfile.tmpdir, "--snapshot-update", "--snapshot-default-extension", "extension_file.MySingleFileExtension", @@ -63,11 +63,11 @@ def test_snapshot_default_extension_option_module_not_found(testfile): assert result.ret -def test_snapshot_default_extension_option_member_not_found(monkeypatch, testfile): - monkeypatch.syspath_prepend(testfile.tmpdir) - +def test_snapshot_default_extension_option_member_not_found(testfile): result = testfile.runpytest( "-v", + "--pythonpath", + testfile.tmpdir, "--snapshot-update", "--snapshot-default-extension", "extension_file.DoesNotExistExtension", ``` **Expected behavior** Tests in my branch should pass. **Environment:** - OS: macOS - Syrupy Version: 4.0.1 - Python Version: 3.11.1 **Workaround** Set `PYTHONPATH` prior to invoking the pytest CLI. ```sh PYTHONPATH=path/to/my/extensions/folder pytest --snapshot-default-extension some_module.SomeExtension ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/integration/test_snapshot_option_extension.py::test_snapshot_default_extension_option_failure", "tests/integration/test_snapshot_option_extension_pythonpath.py::test_snapshot_default_extension_option_success", "tests/integration/test_snapshot_option_extension_pythonpath.py::test_snapshot_default_extension_option_module_not_found", "tests/integration/test_snapshot_option_extension_pythonpath.py::test_snapshot_default_extension_option_failure" ]
[ "tests/integration/test_snapshot_option_extension.py::test_snapshot_default_extension_option_success" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-04-06T23:57:55Z"
apache-2.0
tophat__syrupy-761
diff --git a/src/syrupy/extensions/single_file.py b/src/syrupy/extensions/single_file.py index 8e1a7c5..19b9838 100644 --- a/src/syrupy/extensions/single_file.py +++ b/src/syrupy/extensions/single_file.py @@ -82,9 +82,10 @@ class SingleFileSnapshotExtension(AbstractSyrupyExtension): ) -> "SnapshotCollection": file_ext_len = len(self._file_extension) + 1 if self._file_extension else 0 filename_wo_ext = snapshot_location[:-file_ext_len] + basename = Path(filename_wo_ext).parts[-1] snapshot_collection = SnapshotCollection(location=snapshot_location) - snapshot_collection.add(Snapshot(name=Path(filename_wo_ext).stem)) + snapshot_collection.add(Snapshot(name=basename)) return snapshot_collection def _read_snapshot_data_from_location(
tophat/syrupy
eb3183d3fc0da739d8909272a400fdaa722c2faa
diff --git a/tests/integration/test_single_file_multiple_extensions.py b/tests/integration/test_single_file_multiple_extensions.py index b93f287..bcee53d 100644 --- a/tests/integration/test_single_file_multiple_extensions.py +++ b/tests/integration/test_single_file_multiple_extensions.py @@ -38,3 +38,39 @@ def test_multiple_file_extensions(testdir): result.stdout.re_match_lines((r"1 snapshot passed\.")) assert "snapshots unused" not in result.stdout.str() assert result.ret == 0 + + +def test_class_style(testdir): + """ + Regression test for https://github.com/tophat/syrupy/issues/717 + """ + + testcase = """ + import pytest + from syrupy.extensions.json import JSONSnapshotExtension + + @pytest.fixture + def snapshot(snapshot): + return snapshot.use_extension(JSONSnapshotExtension) + + class TestFoo: + def test_foo(self, snapshot): + assert { 'key': 'value' } == snapshot + """ + + test_file: Path = testdir.makepyfile(test_file=testcase) + + result = testdir.runpytest("-v", "--snapshot-update") + result.stdout.re_match_lines((r"1 snapshot generated\.")) + assert "deleted" not in result.stdout.str() + assert result.ret == 0 + + snapshot_file = ( + Path(test_file).parent / "__snapshots__" / "test_file" / "TestFoo.test_foo.json" + ) + assert snapshot_file.exists() + + result = testdir.runpytest("-v") + result.stdout.re_match_lines((r"1 snapshot passed\.")) + assert "snapshots unused" not in result.stdout.str() + assert result.ret == 0
Syrupy is recognizing snapshot as not being used (4.0.1) **Describe the bug** This is probably a regression in the latest release. I didn't observe the behavior in 4.0.0: 1. On a new test with no snapshot (using the `json` extension), I get the expected behaviour: > 1 snapshot failed > Snapshot 'TestFoo.test_foo' does not exist! 2. With the first `pytest --snapshot-update` (this is where the first signs of the bug is): > 1 snapshot generated. 1 unused snapshot deleted. Deleted TestFoo (tests/foo/__snapshots__/test_foo/TestFoo.test_foo.json) Why does it output `Deleted ....`, that isn't the expected behaviour (the deletion doesn't happen though) 3. Another follow up when running `pytest`: > 1 snapshot passed. 1 snapshot unused. Re-run pytest with --snapshot-update to delete unused snapshots. Again, that isn't correct. It gets worse though: 4. `pytest --snapshot-update`: This time the deletion *will* happen hence the next run of `pytest` will fail since it won't find a snapshot. # Env: Syrupy 4.0.1 Python 3.10 Going back to 4.0.0 solved the issue.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/integration/test_single_file_multiple_extensions.py::test_class_style" ]
[ "tests/integration/test_single_file_multiple_extensions.py::test_multiple_file_extensions" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2023-06-19T03:43:28Z"
apache-2.0
tophat__syrupy-769
diff --git a/src/syrupy/assertion.py b/src/syrupy/assertion.py index 35c301d..328afb4 100644 --- a/src/syrupy/assertion.py +++ b/src/syrupy/assertion.py @@ -45,6 +45,7 @@ class AssertionResult: updated: bool success: bool exception: Optional[Exception] + test_location: "PyTestLocation" @property def final_data(self) -> Optional["SerializedData"]: @@ -303,14 +304,15 @@ class SnapshotAssertion: snapshot_updated = matches is False and assertion_success self._execution_name_index[self.index] = self._executions self._execution_results[self._executions] = AssertionResult( + asserted_data=serialized_data, + created=snapshot_created, + exception=assertion_exception, + recalled_data=snapshot_data, snapshot_location=snapshot_location, snapshot_name=snapshot_name, - recalled_data=snapshot_data, - asserted_data=serialized_data, success=assertion_success, - created=snapshot_created, + test_location=self.test_location, updated=snapshot_updated, - exception=assertion_exception, ) self._executions += 1 self._post_assert() diff --git a/src/syrupy/location.py b/src/syrupy/location.py index 3d8fe2d..0f955bb 100644 --- a/src/syrupy/location.py +++ b/src/syrupy/location.py @@ -15,7 +15,7 @@ from syrupy.constants import PYTEST_NODE_SEP @dataclass class PyTestLocation: - _node: "pytest.Item" + item: "pytest.Item" nodename: Optional[str] = field(init=False) testname: str = field(init=False) methodname: str = field(init=False) @@ -28,16 +28,16 @@ class PyTestLocation: self.__attrs_post_init_def__() def __attrs_post_init_def__(self) -> None: - node_path: Path = getattr(self._node, "path") # noqa: B009 + node_path: Path = getattr(self.item, "path") # noqa: B009 self.filepath = str(node_path.absolute()) - obj = getattr(self._node, "obj") # noqa: B009 + obj = getattr(self.item, "obj") # noqa: B009 self.modulename = obj.__module__ self.methodname = obj.__name__ - self.nodename = getattr(self._node, "name", None) + self.nodename = getattr(self.item, "name", None) self.testname = self.nodename or self.methodname def __attrs_post_init_doc__(self) -> None: - doctest = getattr(self._node, "dtest") # noqa: B009 + doctest = getattr(self.item, "dtest") # noqa: B009 self.filepath = doctest.filename test_relfile, test_node = self.nodeid.split(PYTEST_NODE_SEP) test_relpath = Path(test_relfile) @@ -64,7 +64,7 @@ class PyTestLocation: :raises: `AttributeError` if node has no node id :return: test node id """ - return str(getattr(self._node, "nodeid")) # noqa: B009 + return str(getattr(self.item, "nodeid")) # noqa: B009 @property def basename(self) -> str: @@ -78,7 +78,7 @@ class PyTestLocation: @property def is_doctest(self) -> bool: - return self.__is_doctest(self._node) + return self.__is_doctest(self.item) def __is_doctest(self, node: "pytest.Item") -> bool: return hasattr(node, "dtest") diff --git a/src/syrupy/report.py b/src/syrupy/report.py index 4088be4..5eaa4b6 100644 --- a/src/syrupy/report.py +++ b/src/syrupy/report.py @@ -22,6 +22,8 @@ from typing import ( Set, ) +from _pytest.skipping import xfailed_key + from .constants import PYTEST_NODE_SEP from .data import ( Snapshot, @@ -70,6 +72,7 @@ class SnapshotReport: used: "SnapshotCollections" = field(default_factory=SnapshotCollections) _provided_test_paths: Dict[str, List[str]] = field(default_factory=dict) _keyword_expressions: Set["Expression"] = field(default_factory=set) + _num_xfails: int = field(default=0) @property def update_snapshots(self) -> bool: @@ -89,6 +92,14 @@ class SnapshotReport: getattr(item, "nodeid"): item for item in self.collected_items # noqa: B009 } + def _has_xfail(self, item: "pytest.Item") -> bool: + # xfailed_key is 'private'. I'm open to a better way to do this: + if xfailed_key in item.stash: + result = item.stash[xfailed_key] + if result: + return result.run + return False + def __post_init__(self) -> None: self.__parse_invocation_args() @@ -113,6 +124,7 @@ class SnapshotReport: Snapshot(name=result.snapshot_name, data=result.final_data) ) self.used.update(snapshot_collection) + if result.created: self.created.update(snapshot_collection) elif result.updated: @@ -120,6 +132,9 @@ class SnapshotReport: elif result.success: self.matched.update(snapshot_collection) else: + has_xfail = self._has_xfail(item=result.test_location.item) + if has_xfail: + self._num_xfails += 1 self.failed.update(snapshot_collection) def __parse_invocation_args(self) -> None: @@ -161,7 +176,7 @@ class SnapshotReport: def num_created(self) -> int: return self._count_snapshots(self.created) - @property + @cached_property def num_failed(self) -> int: return self._count_snapshots(self.failed) @@ -256,14 +271,22 @@ class SnapshotReport: ``` """ summary_lines: List[str] = [] - if self.num_failed: + if self.num_failed and self._num_xfails < self.num_failed: summary_lines.append( ngettext( "{} snapshot failed.", "{} snapshots failed.", - self.num_failed, - ).format(error_style(self.num_failed)) + self.num_failed - self._num_xfails, + ).format(error_style(self.num_failed - self._num_xfails)), ) + if self._num_xfails: + summary_lines.append( + ngettext( + "{} snapshot xfailed.", + "{} snapshots xfailed.", + self._num_xfails, + ).format(warning_style(self._num_xfails)), + ) if self.num_matched: summary_lines.append( ngettext(
tophat/syrupy
6a93c87229b2091d16a4190bd5f6a8c36a71ecad
diff --git a/tests/integration/test_xfail.py b/tests/integration/test_xfail.py new file mode 100644 index 0000000..5113717 --- /dev/null +++ b/tests/integration/test_xfail.py @@ -0,0 +1,54 @@ +def test_no_failure_printed_if_all_failures_xfailed(testdir): + testdir.makepyfile( + test_file=( + """ + import pytest + + @pytest.mark.xfail(reason="Failure expected.") + def test_a(snapshot): + assert snapshot == 'does-not-exist' + """ + ) + ) + result = testdir.runpytest("-v") + result.stdout.no_re_match_line((r".*snapshot failed*")) + assert result.ret == 0 + + +def test_failures_printed_if_only_some_failures_xfailed(testdir): + testdir.makepyfile( + test_file=( + """ + import pytest + + @pytest.mark.xfail(reason="Failure expected.") + def test_a(snapshot): + assert snapshot == 'does-not-exist' + + def test_b(snapshot): + assert snapshot == 'other' + """ + ) + ) + result = testdir.runpytest("-v") + result.stdout.re_match_lines((r".*1 snapshot failed*")) + result.stdout.re_match_lines((r".*1 snapshot xfailed*")) + assert result.ret == 1 + + +def test_failure_printed_if_xfail_does_not_run(testdir): + testdir.makepyfile( + test_file=( + """ + import pytest + + @pytest.mark.xfail(False, reason="Failure expected.") + def test_a(snapshot): + assert snapshot == 'does-not-exist' + """ + ) + ) + result = testdir.runpytest("-v") + result.stdout.re_match_lines((r".*1 snapshot failed*")) + result.stdout.no_re_match_line((r".*1 snapshot xfailed*")) + assert result.ret == 1 diff --git a/tests/syrupy/extensions/amber/test_amber_snapshot_diff.py b/tests/syrupy/extensions/amber/test_amber_snapshot_diff.py index 71cef86..9dcff61 100644 --- a/tests/syrupy/extensions/amber/test_amber_snapshot_diff.py +++ b/tests/syrupy/extensions/amber/test_amber_snapshot_diff.py @@ -51,9 +51,9 @@ def test_snapshot_diff_id(snapshot): assert dictCase3 == snapshot(name="case3", diff="large snapshot") [email protected](reason="Asserting snapshot does not exist") def test_snapshot_no_diff_raises_exception(snapshot): my_dict = { "field_0": "value_0", } - with pytest.raises(AssertionError, match="SnapshotDoesNotExist"): - assert my_dict == snapshot(diff="does not exist index") + assert my_dict == snapshot(diff="does not exist index")
Tests marked `xfail` are reported as failures Hey there, thanks for your work on `syrupy`! I'm wondering if the fact that XFAIL tests are reported as failures is an intended design decision, a bug, or something you haven't contemplated yet. The full context is from https://github.com/Textualize/textual/issues/2282 but, in short, I have a snapshot test that is marked with `xfail`: ![](https://user-images.githubusercontent.com/5621605/231783110-cb2fd213-8fbe-4746-b9fe-8e16368c256a.png) However, at the end, I get a report saying that one snapshot test failed: ![](https://user-images.githubusercontent.com/5621605/231783032-733059ee-9ddd-429d-bba6-34bd39facfcc.png) I expected to see a yellow warning saying that one snapshot test gave an expected failure instead of the red warning saying that the test failed, especially taking into account the confusing contrast with pytest, which happily reports that the tests passed.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/integration/test_xfail.py::test_no_failure_printed_if_all_failures_xfailed", "tests/integration/test_xfail.py::test_failures_printed_if_only_some_failures_xfailed" ]
[ "tests/integration/test_xfail.py::test_failure_printed_if_xfail_does_not_run", "tests/syrupy/extensions/amber/test_amber_snapshot_diff.py::test_snapshot_diff", "tests/syrupy/extensions/amber/test_amber_snapshot_diff.py::test_snapshot_diff_id" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2023-07-11T21:02:14Z"
apache-2.0
tornadoweb__tornado-2393
diff --git a/tornado/autoreload.py b/tornado/autoreload.py index 2f911270..7d69474a 100644 --- a/tornado/autoreload.py +++ b/tornado/autoreload.py @@ -107,6 +107,9 @@ _watched_files = set() _reload_hooks = [] _reload_attempted = False _io_loops = weakref.WeakKeyDictionary() # type: ignore +_autoreload_is_main = False +_original_argv = None +_original_spec = None def start(check_time=500): @@ -214,11 +217,15 @@ def _reload(): # __spec__ is not available (Python < 3.4), check instead if # sys.path[0] is an empty string and add the current directory to # $PYTHONPATH. - spec = getattr(sys.modules['__main__'], '__spec__', None) - if spec: - argv = ['-m', spec.name] + sys.argv[1:] + if _autoreload_is_main: + spec = _original_spec + argv = _original_argv else: + spec = getattr(sys.modules['__main__'], '__spec__', None) argv = sys.argv + if spec: + argv = ['-m', spec.name] + argv[1:] + else: path_prefix = '.' + os.pathsep if (sys.path[0] == '' and not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): @@ -226,7 +233,7 @@ def _reload(): os.environ.get("PYTHONPATH", "")) if not _has_execv: subprocess.Popen([sys.executable] + argv) - sys.exit(0) + os._exit(0) else: try: os.execv(sys.executable, [sys.executable] + argv) @@ -269,7 +276,17 @@ def main(): can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`. """ + # Remember that we were launched with autoreload as main. + # The main module can be tricky; set the variables both in our globals + # (which may be __main__) and the real importable version. + import tornado.autoreload + global _autoreload_is_main + global _original_argv, _original_spec + tornado.autoreload._autoreload_is_main = _autoreload_is_main = True original_argv = sys.argv + tornado.autoreload._original_argv = _original_argv = original_argv + original_spec = getattr(sys.modules['__main__'], '__spec__', None) + tornado.autoreload._original_spec = _original_spec = original_spec sys.argv = sys.argv[:] if len(sys.argv) >= 3 and sys.argv[1] == "-m": mode = "module"
tornadoweb/tornado
eb487cac3d829292ecca6e5124b1da5ae6bba407
diff --git a/tornado/test/autoreload_test.py b/tornado/test/autoreload_test.py index 6a9729db..1ea53167 100644 --- a/tornado/test/autoreload_test.py +++ b/tornado/test/autoreload_test.py @@ -1,14 +1,19 @@ from __future__ import absolute_import, division, print_function import os +import shutil import subprocess from subprocess import Popen import sys from tempfile import mkdtemp +import time from tornado.test.util import unittest -MAIN = """\ +class AutoreloadTest(unittest.TestCase): + + def test_reload_module(self): + main = """\ import os import sys @@ -24,15 +29,13 @@ if 'TESTAPP_STARTED' not in os.environ: autoreload._reload() """ - -class AutoreloadTest(unittest.TestCase): - def test_reload_module(self): # Create temporary test application path = mkdtemp() + self.addCleanup(shutil.rmtree, path) os.mkdir(os.path.join(path, 'testapp')) open(os.path.join(path, 'testapp/__init__.py'), 'w').close() with open(os.path.join(path, 'testapp/__main__.py'), 'w') as f: - f.write(MAIN) + f.write(main) # Make sure the tornado module under test is available to the test # application @@ -46,3 +49,64 @@ class AutoreloadTest(unittest.TestCase): universal_newlines=True) out = p.communicate()[0] self.assertEqual(out, 'Starting\nStarting\n') + + def test_reload_wrapper_preservation(self): + # This test verifies that when `python -m tornado.autoreload` + # is used on an application that also has an internal + # autoreload, the reload wrapper is preserved on restart. + main = """\ +import os +import sys + +# This import will fail if path is not set up correctly +import testapp + +if 'tornado.autoreload' not in sys.modules: + raise Exception('started without autoreload wrapper') + +import tornado.autoreload + +print('Starting') +sys.stdout.flush() +if 'TESTAPP_STARTED' not in os.environ: + os.environ['TESTAPP_STARTED'] = '1' + # Simulate an internal autoreload (one not caused + # by the wrapper). + tornado.autoreload._reload() +else: + # Exit directly so autoreload doesn't catch it. + os._exit(0) +""" + + # Create temporary test application + path = mkdtemp() + os.mkdir(os.path.join(path, 'testapp')) + self.addCleanup(shutil.rmtree, path) + init_file = os.path.join(path, 'testapp', '__init__.py') + open(init_file, 'w').close() + main_file = os.path.join(path, 'testapp', '__main__.py') + with open(main_file, 'w') as f: + f.write(main) + + # Make sure the tornado module under test is available to the test + # application + pythonpath = os.getcwd() + if 'PYTHONPATH' in os.environ: + pythonpath += os.pathsep + os.environ['PYTHONPATH'] + + autoreload_proc = Popen( + [sys.executable, '-m', 'tornado.autoreload', '-m', 'testapp'], + stdout=subprocess.PIPE, cwd=path, + env=dict(os.environ, PYTHONPATH=pythonpath), + universal_newlines=True) + + for i in range(20): + if autoreload_proc.poll() is not None: + break + time.sleep(0.1) + else: + autoreload_proc.kill() + raise Exception("subprocess failed to terminate") + + out = autoreload_proc.communicate()[0] + self.assertEqual(out, 'Starting\n' * 2)
autoreload: Fix argv preservation `autoreload` currently has a wrapper mode (e.g. `python -m tornado.autoreload -m tornado.test`) for scripts, and an in-process mode (enabled by `Application(..., debug=True)`). It's useful to combine these, since the wrapper can catch syntax errors that cause the process to abort before entering its IOLoop. However, this doesn't work as well as it should, because the `main` wrapper only restores `sys.argv` if the process exits, meaning the `-m tornado.autoreload` flags are lost if the inner autoreload fires. The original argv needs to be stored in a global when `autoreload` is `__main__`, so that it can be used in `_reload()`.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tornado/test/autoreload_test.py::AutoreloadTest::test_reload_wrapper_preservation" ]
[ "tornado/test/autoreload_test.py::AutoreloadTest::test_reload_module" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-05-19T23:59:17Z"
apache-2.0
torrua__keyboa-15
diff --git a/keyboa/button.py b/keyboa/button.py index 6280ba8..9b70703 100644 --- a/keyboa/button.py +++ b/keyboa/button.py @@ -46,6 +46,9 @@ class Button(ButtonCheck): back_marker: CallbackDataMarker = str() copy_text_to_callback: Optional[bool] = None + def __call__(self, *args, **kwargs): + return self.generate() + def generate(self) -> InlineKeyboardButton: """ This function creates an InlineKeyboardButton object from various data types,
torrua/keyboa
f70ec7162e4352726922d60088f2bce9e88fc96f
diff --git a/tests/test_base.py b/tests/test_base.py index 78dc2cb..1c77e28 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -17,7 +17,7 @@ def test_items_is_none_or_empty(): :return: """ with pytest.raises(ValueError) as _: - Keyboa(items=list()) + Keyboa(items=[]) with pytest.raises(ValueError) as _: Keyboa(items=None) diff --git a/tests/test_button.py b/tests/test_button.py index b0a8ad1..435183a 100644 --- a/tests/test_button.py +++ b/tests/test_button.py @@ -37,7 +37,7 @@ UNACCEPTABLE_BUTTON_SOURCE_TYPES = ( {2, "a"}, {"a", 2}, [2, "a"], - (2, dict()), + (2, {}), ["a", 2], (None, 2), (None, None), @@ -284,3 +284,8 @@ def test_none_as_markers(): def test_button_property(): btn = Button(button_data="button_text", copy_text_to_callback=True).button assert isinstance(btn, InlineKeyboardButton) + + +def test_button_call_method(): + btn = Button(button_data="button_text", copy_text_to_callback=True) + assert isinstance(btn(), InlineKeyboardButton) diff --git a/tests/test_keyboard.py b/tests/test_keyboard.py index 41627fb..0709297 100644 --- a/tests/test_keyboard.py +++ b/tests/test_keyboard.py @@ -667,3 +667,22 @@ def test_kb_with_items_in_row_and_last_buttons(): items_in_row=2, ).keyboard assert len(keyboa.keyboard) == 4 + + +def test_kb_is_callable(): + keyboa = Keyboa( + items=[ + (1, "a"), + (2, "b"), + (3, "c"), + (4, "d"), + (5, "e"), + (6, "f"), + ], + back_marker="_is_callable", + items_in_row=2, + ) + assert type(keyboa.keyboard) == type(keyboa()) + assert keyboa.keyboard.to_json() == keyboa().to_json() == keyboa.slice().to_json() + assert keyboa.slice(slice(3)).to_json() == keyboa(slice(3)).to_json() + assert keyboa.slice(slice(2, 4, 2)).to_json() == keyboa(slice(2, 4, 2)).to_json()
(PTC-W0019) Consider using literal syntax to create the data structure ## Description Using the literal syntax can give minor performance bumps compared to using function calls to create `dict`, `list` and `tuple`. ## Occurrences There are 2 occurrences of this issue in the repository. See all occurrences on DeepSource &rarr; [deepsource.io/gh/torrua/keyboa/issue/PTC-W0019/occurrences/](https://deepsource.io/gh/torrua/keyboa/issue/PTC-W0019/occurrences/)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_button.py::test_button_call_method" ]
[ "tests/test_base.py::test_items_is_none_or_empty", "tests/test_base.py::test_copy_text_to_callback_is_not_bool", "tests/test_base.py::test_number_of_items_out_of_limits", "tests/test_base.py::test_number_of_items_in_row_out_of_limits", "tests/test_button.py::test_acceptable_button_source_types[2_0]", "tests/test_button.py::test_acceptable_button_source_types[a]", "tests/test_button.py::test_acceptable_button_source_types[2_1]", "tests/test_button.py::test_acceptable_button_source_types[button_data3]", "tests/test_button.py::test_acceptable_button_source_types[button_data4]", "tests/test_button.py::test_acceptable_button_source_types[button_data5]", "tests/test_button.py::test_acceptable_button_source_types[button_data6]", "tests/test_button.py::test_acceptable_button_source_types[button_data7]", "tests/test_button.py::test_unacceptable_button_source_types_without_callback[2_0]", "tests/test_button.py::test_unacceptable_button_source_types_without_callback[a]", "tests/test_button.py::test_unacceptable_button_source_types_without_callback[2_1]", "tests/test_button.py::test_unacceptable_button_source_types_without_callback[button_data3]", "tests/test_button.py::test_unacceptable_button_source_types[button_data0]", "tests/test_button.py::test_unacceptable_button_source_types[button_data1]", "tests/test_button.py::test_unacceptable_button_source_types[button_data2]", "tests/test_button.py::test_unacceptable_button_source_types[button_data3]", "tests/test_button.py::test_unacceptable_button_source_types[button_data4]", "tests/test_button.py::test_unacceptable_button_source_types[button_data5]", "tests/test_button.py::test_unacceptable_button_source_types[button_data6]", "tests/test_button.py::test_unacceptable_button_source_types[None]", "tests/test_button.py::test_unacceptable_front_marker_type", "tests/test_button.py::test_unacceptable_back_marker_type", "tests/test_button.py::test_unacceptable_callback_data_type", "tests/test_button.py::test_unacceptable_text_type[button_data0]", "tests/test_button.py::test_unacceptable_text_type[button_data1]", "tests/test_button.py::test_unacceptable_text_type[button_data2]", "tests/test_button.py::test_create_button_from_dict_tuple_list[button_data0]", "tests/test_button.py::test_create_button_from_dict_tuple_list[button_data1]", "tests/test_button.py::test_create_button_from_int_or_str_with_copy_option[12345_0]", "tests/test_button.py::test_create_button_from_int_or_str_with_copy_option[12345_1]", "tests/test_button.py::test_create_button_from_int_or_str_without_copy_option[12345_0]", "tests/test_button.py::test_create_button_from_int_or_str_without_copy_option[12345_1]", "tests/test_button.py::test_create_button_from_int_or_str_without_callback[12345_0]", "tests/test_button.py::test_create_button_from_int_or_str_without_callback[12345_1]", "tests/test_button.py::test_create_button_from_button", "tests/test_button.py::test_empty_text", "tests/test_button.py::test_empty_callback_data", "tests/test_button.py::test_big_callback_data", "tests/test_button.py::test_none_as_markers", "tests/test_button.py::test_button_property", "tests/test_keyboard.py::test_keyboards_is_none", "tests/test_keyboard.py::test_keyboards_is_single_keyboard", "tests/test_keyboard.py::test_keyboards_is_multi_keyboards", "tests/test_keyboard.py::test_not_keyboard_for_merge", "tests/test_keyboard.py::test_merge_two_keyboard_into_one_out_of_limits", "tests/test_keyboard.py::test_pass_string_with_copy_to_callback", "tests/test_keyboard.py::test_pass_string_without_copy_to_callback", "tests/test_keyboard.py::test_pass_one_button", "tests/test_keyboard.py::test_pass_one_item_dict_with_text_field", "tests/test_keyboard.py::test_pass_one_item_dict_without_text_field", "tests/test_keyboard.py::test_pass_multi_item_dict_without_text_field", "tests/test_keyboard.py::test_pass_one_row", "tests/test_keyboard.py::test_pass_structure", "tests/test_keyboard.py::test_auto_keyboa_maker_alignment", "tests/test_keyboard.py::test_auto_keyboa_maker_items_in_row", "tests/test_keyboard.py::test_slice", "tests/test_keyboard.py::test_minimal_kb_with_copy_text_to_callback_specified_none", "tests/test_keyboard.py::test_minimal_kb_with_items_out_of_limits", "tests/test_keyboard.py::test_minimal_kb_with_copy_text_to_callback_specified_true", "tests/test_keyboard.py::test_minimal_kb_with_copy_text_to_callback_specified_false", "tests/test_keyboard.py::test_minimal_kb_with_fixed_items_in_row[2]", "tests/test_keyboard.py::test_minimal_kb_with_fixed_items_in_row[3]", "tests/test_keyboard.py::test_minimal_kb_with_fixed_items_in_row[4]", "tests/test_keyboard.py::test_minimal_kb_with_fixed_items_in_row[6]", "tests/test_keyboard.py::test_minimal_kb_with_front_marker", "tests/test_keyboard.py::test_minimal_kb_with_front_marker_and_copy_text_to_callback", "tests/test_keyboard.py::test_minimal_kb_with_back_marker", "tests/test_keyboard.py::test_minimal_kb_with_back_marker_out_of_limits", "tests/test_keyboard.py::test_minimal_kb_with_back_marker_out_of_limits_with_text", "tests/test_keyboard.py::test_minimal_kb_with_empty_back_marker", "tests/test_keyboard.py::test_minimal_kb_with_back_marker_and_copy_text_to_callback", "tests/test_keyboard.py::test_minimal_kb_with_front_and_back_markers", "tests/test_keyboard.py::test_minimal_kb_with_front_and_back_markers_and_copy_text_to_callback", "tests/test_keyboard.py::test_minimal_kb_with_front_and_back_markers_and_copy_text_to_callback_is_false", "tests/test_keyboard.py::test_minimal_kb_with_alignment_true", "tests/test_keyboard.py::test_minimal_kb_with_items_in_row", "tests/test_keyboard.py::test_minimal_kb_with_items_in_row_out_of_limits", "tests/test_keyboard.py::test_minimal_kb_with_alignment_true_slice", "tests/test_keyboard.py::test_minimal_kb_with_alignment_true_and_reversed_alignment_true", "tests/test_keyboard.py::test_minimal_kb_with_alignment_specified", "tests/test_keyboard.py::test_minimal_kb_with_alignment_specified_out_of_limits", "tests/test_keyboard.py::test_minimal_kb_with_alignment_specified_and_reversed_alignment_true", "tests/test_keyboard.py::test_minimal_kb_with_reversed_alignment_true", "tests/test_keyboard.py::test_minimal_kb_with_all_parameters_specified_reversed_range_true", "tests/test_keyboard.py::test_minimal_kb_with_all_parameters_specified_reversed_range_false", "tests/test_keyboard.py::test_structured_kb", "tests/test_keyboard.py::test_structured_kb_with_alignment", "tests/test_keyboard.py::test_structured_kb_with_items_in_row", "tests/test_keyboard.py::test_structured_kb_with_front_marker", "tests/test_keyboard.py::test_structured_kb_with_front_marker_no_copy_text_to_callback", "tests/test_keyboard.py::test_kb_from_tuples", "tests/test_keyboard.py::test_kb_from_tuples_with_front_marker", "tests/test_keyboard.py::test_kb_from_tuples_with_back_marker_and_items_in_row", "tests/test_keyboard.py::test_kb_with_items_in_row_and_last_buttons", "tests/test_keyboard.py::test_kb_is_callable" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2021-10-01T04:11:39Z"
mit
tortoise__tortoise-orm-1104
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2ffaf9d..47116a8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -10,6 +10,9 @@ Changelog ==== 0.19.1 ------ +Added +^^^^^ +- Added `Postgres` partial indexes support. (#1103) Fixed ^^^^^ - `TimeField` for `MySQL` will return `datetime.timedelta` object instead of `datetime.time` object. diff --git a/tortoise/contrib/postgres/indexes.py b/tortoise/contrib/postgres/indexes.py index 0992357..c536464 100644 --- a/tortoise/contrib/postgres/indexes.py +++ b/tortoise/contrib/postgres/indexes.py @@ -1,13 +1,31 @@ -from abc import ABCMeta +from typing import Optional, Set + +from pypika.terms import Term, ValueWrapper from tortoise.indexes import Index -class PostgreSQLIndex(Index, metaclass=ABCMeta): +class PostgreSQLIndex(Index): INDEX_CREATE_TEMPLATE = ( - "CREATE INDEX {exists}{index_name} ON {table_name} USING{index_type}({fields});" + "CREATE INDEX {exists}{index_name} ON {table_name} USING{index_type}({fields}){extra};" ) + def __init__( + self, + *expressions: Term, + fields: Optional[Set[str]] = None, + name: Optional[str] = None, + condition: Optional[dict] = None, + ): + super().__init__(*expressions, fields=fields, name=name) + if condition: + cond = " WHERE " + items = [] + for k, v in condition.items(): + items.append(f"{k} = {ValueWrapper(v)}") + cond += " AND ".join(items) + self.extra = cond + class BloomIndex(PostgreSQLIndex): INDEX_TYPE = "BLOOM"
tortoise/tortoise-orm
e023166781bed2c485b43d1e862c455fa7c3e872
diff --git a/tests/schema/models_postgres_index.py b/tests/schema/models_postgres_index.py index edc7ab2..e2d0fa9 100644 --- a/tests/schema/models_postgres_index.py +++ b/tests/schema/models_postgres_index.py @@ -6,6 +6,7 @@ from tortoise.contrib.postgres.indexes import ( GinIndex, GistIndex, HashIndex, + PostgreSQLIndex, SpGistIndex, ) @@ -17,6 +18,7 @@ class Index(Model): gist = TSVectorField() sp_gist = fields.CharField(max_length=200) hash = fields.CharField(max_length=200) + partial = fields.CharField(max_length=200) class Meta: indexes = [ @@ -26,4 +28,5 @@ class Index(Model): GistIndex(fields={"gist"}), SpGistIndex(fields={"sp_gist"}), HashIndex(fields={"hash"}), + PostgreSQLIndex(fields={"partial"}, condition={"id": 1}), ] diff --git a/tests/schema/test_generate_schema.py b/tests/schema/test_generate_schema.py index 141cf55..76444c2 100644 --- a/tests/schema/test_generate_schema.py +++ b/tests/schema/test_generate_schema.py @@ -1061,19 +1061,22 @@ COMMENT ON TABLE "teamevents" IS 'How participants relate'; "gin" TSVECTOR NOT NULL, "gist" TSVECTOR NOT NULL, "sp_gist" VARCHAR(200) NOT NULL, - "hash" VARCHAR(200) NOT NULL + "hash" VARCHAR(200) NOT NULL, + "partial" VARCHAR(200) NOT NULL ); CREATE INDEX "idx_index_bloom_280137" ON "index" USING BLOOM ("bloom"); CREATE INDEX "idx_index_brin_a54a00" ON "index" USING BRIN ("brin"); CREATE INDEX "idx_index_gin_a403ee" ON "index" USING GIN ("gin"); CREATE INDEX "idx_index_gist_c807bf" ON "index" USING GIST ("gist"); CREATE INDEX "idx_index_sp_gist_2c0bad" ON "index" USING SPGIST ("sp_gist"); -CREATE INDEX "idx_index_hash_cfe6b5" ON "index" USING HASH ("hash");""", +CREATE INDEX "idx_index_hash_cfe6b5" ON "index" USING HASH ("hash"); +CREATE INDEX "idx_index_partial_c5be6a" ON "index" USING ("partial") WHERE id = 1;""", ) async def test_index_safe(self): await self.init_for("tests.schema.models_postgres_index") sql = get_schema_sql(connections.get("default"), safe=True) + print(sql) self.assertEqual( sql, """CREATE TABLE IF NOT EXISTS "index" ( @@ -1083,14 +1086,16 @@ CREATE INDEX "idx_index_hash_cfe6b5" ON "index" USING HASH ("hash");""", "gin" TSVECTOR NOT NULL, "gist" TSVECTOR NOT NULL, "sp_gist" VARCHAR(200) NOT NULL, - "hash" VARCHAR(200) NOT NULL + "hash" VARCHAR(200) NOT NULL, + "partial" VARCHAR(200) NOT NULL ); CREATE INDEX IF NOT EXISTS "idx_index_bloom_280137" ON "index" USING BLOOM ("bloom"); CREATE INDEX IF NOT EXISTS "idx_index_brin_a54a00" ON "index" USING BRIN ("brin"); CREATE INDEX IF NOT EXISTS "idx_index_gin_a403ee" ON "index" USING GIN ("gin"); CREATE INDEX IF NOT EXISTS "idx_index_gist_c807bf" ON "index" USING GIST ("gist"); CREATE INDEX IF NOT EXISTS "idx_index_sp_gist_2c0bad" ON "index" USING SPGIST ("sp_gist"); -CREATE INDEX IF NOT EXISTS "idx_index_hash_cfe6b5" ON "index" USING HASH ("hash");""", +CREATE INDEX IF NOT EXISTS "idx_index_hash_cfe6b5" ON "index" USING HASH ("hash"); +CREATE INDEX IF NOT EXISTS "idx_index_partial_c5be6a" ON "index" USING ("partial") WHERE id = 1;""", ) async def test_m2m_no_auto_create(self):
Postgress partial indexes support Are there any plans for supporting postgress [partial indexes](https://www.postgresql.org/docs/current/indexes-partial.html)? Especially a unique one. It will be really helpful
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/schema/test_generate_schema.py::TestGenerateSchema::test_create_index", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_cyclic", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_fk_bad_model_name", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_fk_bad_null", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_fk_bad_on_delete", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_m2m_bad_model_name", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_m2m_no_auto_create", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_minrelation", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_noid", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_o2o_bad_null", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_o2o_bad_on_delete", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_safe_generation", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_schema", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_schema_no_db_constraint", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_schema_safe", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_table_and_row_comment_generation", "tests/schema/test_generate_schema.py::TestGenerateSchema::test_unsafe_generation" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-04-13T02:03:30Z"
apache-2.0
tortoise__tortoise-orm-1123
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4cf9422..a2bab96 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -18,6 +18,7 @@ Added Fixed ^^^^^ - `TimeField` for `MySQL` will return `datetime.timedelta` object instead of `datetime.time` object. +- Fix on conflict do nothing. (#1122) 0.19.0 ------ diff --git a/tortoise/backends/base/executor.py b/tortoise/backends/base/executor.py index 9dc7590..3cfdec0 100644 --- a/tortoise/backends/base/executor.py +++ b/tortoise/backends/base/executor.py @@ -208,7 +208,7 @@ class BaseExecutor: .insert(*[self.parameter(i) for i in range(len(columns))]) ) if ignore_conflicts: - query = query.do_nothing() + query = query.on_conflict().do_nothing() return query async def _process_insert_result(self, instance: "Model", results: Any) -> None: diff --git a/tortoise/backends/base_postgres/executor.py b/tortoise/backends/base_postgres/executor.py index 39dab14..db34a37 100644 --- a/tortoise/backends/base_postgres/executor.py +++ b/tortoise/backends/base_postgres/executor.py @@ -46,7 +46,7 @@ class BasePostgresExecutor(BaseExecutor): if generated_fields: query = query.returning(*generated_fields) if ignore_conflicts: - query = query.do_nothing() + query = query.on_conflict().do_nothing() return query async def _process_insert_result(self, instance: Model, results: Optional[dict]) -> None:
tortoise/tortoise-orm
aa3d51126065f352e21f7e1531b09547e54aee97
diff --git a/tests/test_bulk.py b/tests/test_bulk.py index 0af5eea..a0875db 100644 --- a/tests/test_bulk.py +++ b/tests/test_bulk.py @@ -132,6 +132,7 @@ class TestBulk(test.TruncationTestCase): async def test_bulk_create_ignore_conflicts(self): name1 = UniqueName(name="name1") name2 = UniqueName(name="name2") + await UniqueName.bulk_create([name1, name2]) await UniqueName.bulk_create([name1, name2], ignore_conflicts=True) with self.assertRaises(IntegrityError): await UniqueName.bulk_create([name1, name2])
bulk_create ignore_conflicts param does not change query **Describe the bug** The ignore_conflicts in the bulk_create function does not change the sql query. It does not add ON CONFLICT DO NOTHING **To Reproduce* Using version 0.19.0 and postgres executor ``` print(SomeModel.bulk_create(objects=objects, ignore_conflicts=True).sql() ``` **Expected behavior** Add "ON CONFLICT DO NOTHING" at the of insert query
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_bulk.py::TestBulk::test_bulk_create_ignore_conflicts" ]
[ "tests/test_bulk.py::TestBulk::test_bulk_create", "tests/test_bulk.py::TestBulk::test_bulk_create_fail", "tests/test_bulk.py::TestBulk::test_bulk_create_in_transaction", "tests/test_bulk.py::TestBulk::test_bulk_create_in_transaction_fail", "tests/test_bulk.py::TestBulk::test_bulk_create_mix_specified", "tests/test_bulk.py::TestBulk::test_bulk_create_more_that_one_update_fields", "tests/test_bulk.py::TestBulk::test_bulk_create_update_fields", "tests/test_bulk.py::TestBulk::test_bulk_create_uuidpk", "tests/test_bulk.py::TestBulk::test_bulk_create_uuidpk_fail", "tests/test_bulk.py::TestBulk::test_bulk_create_uuidpk_in_transaction", "tests/test_bulk.py::TestBulk::test_bulk_create_uuidpk_in_transaction_fail", "tests/test_bulk.py::TestBulk::test_bulk_create_with_batch_size", "tests/test_bulk.py::TestBulk::test_bulk_create_with_specified" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2022-05-08T14:18:55Z"
apache-2.0
toumorokoshi__deepmerge-22
diff --git a/Makefile b/Makefile index 9a611ee..75ba49c 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ build: .venv/deps # only works with python 3+ lint: .venv/deps - .venv/bin/python -m pip install black==21.12b0 + .venv/bin/python -m pip install black==22.3.0 .venv/bin/python -m black --check . test: .venv/deps diff --git a/deepmerge/extended_set.py b/deepmerge/extended_set.py new file mode 100644 index 0000000..1d51b43 --- /dev/null +++ b/deepmerge/extended_set.py @@ -0,0 +1,25 @@ +class ExtendedSet(set): + """ + ExtendedSet is an extension of set, which allows for usage + of types that are typically not allowed in a set + (e.g. unhashable). + + The following types that cannot be used in a set are supported: + + - unhashable types + """ + + def __init__(self, elements): + self._values_by_hash = {self._hash(e): e for e in elements} + + def _insert(self, element): + self._values_by_hash[self._hash(element)] = element + + def _hash(self, element): + if getattr(element, "__hash__") is not None: + return hash(element) + else: + return hash(str(element)) + + def __contains__(self, obj): + return self._hash(obj) in self._values_by_hash diff --git a/deepmerge/strategy/list.py b/deepmerge/strategy/list.py index ca42828..2e42519 100644 --- a/deepmerge/strategy/list.py +++ b/deepmerge/strategy/list.py @@ -1,4 +1,5 @@ from .core import StrategyList +from ..extended_set import ExtendedSet class ListStrategies(StrategyList): @@ -26,5 +27,5 @@ class ListStrategies(StrategyList): @staticmethod def strategy_append_unique(config, path, base, nxt): """append items without duplicates in nxt to base.""" - base_as_set = set(base) + base_as_set = ExtendedSet(base) return base + [n for n in nxt if n not in base_as_set] diff --git a/docs/conf.py b/docs/conf.py index ee1edbc..df0dc4d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -52,18 +52,18 @@ source_suffix = ".rst" master_doc = "index" # General information about the project. -project = u"deepmerge" -copyright = u"2016, Yusuke Tsutsumi" -author = u"Yusuke Tsutsumi" +project = "deepmerge" +copyright = "2016, Yusuke Tsutsumi" +author = "Yusuke Tsutsumi" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u"0.1" +version = "0.1" # The full version, including alpha/beta/rc tags. -release = u"0.1" +release = "0.1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -271,8 +271,8 @@ latex_documents = [ ( master_doc, "deepmerge.tex", - u"deepmerge Documentation", - u"Yusuke Tsutsumi", + "deepmerge Documentation", + "Yusuke Tsutsumi", "manual", ), ] @@ -308,7 +308,7 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "deepmerge", u"deepmerge Documentation", [author], 1)] +man_pages = [(master_doc, "deepmerge", "deepmerge Documentation", [author], 1)] # If true, show URL addresses after external links. # @@ -324,7 +324,7 @@ texinfo_documents = [ ( master_doc, "deepmerge", - u"deepmerge Documentation", + "deepmerge Documentation", author, "deepmerge", "One line description of project.", diff --git a/docs/guide.rst b/docs/guide.rst index 39f414a..a13bb01 100644 --- a/docs/guide.rst +++ b/docs/guide.rst @@ -10,7 +10,7 @@ it's recommended to choose your own strategies, deepmerge does provided some preconfigured mergers for a common situations: * deepmerge.always_merger: always try to merge. in the case of mismatches, the value from the second object overrides the first o ne. -* deepmerge.merge_or_raise: try to merge, raise an exception if an unmergable situation is encountered. +* deepmerge.merge_or_raise: try to merge, raise an exception if an unmergable situation is encountered. * deepmerge.conservative_merger: similar to always_merger, but in the case of a conflict, use the existing value. Once a merger is constructed, it then has a merge() method that can be called: @@ -33,7 +33,6 @@ Once a merger is constructed, it then has a merge() method that can be called: Merges are Destructive ====================== - You may have noticed from the example, but merging is a destructive behavior: it will modify the first argument passed in (the base) as part of the merge. This is intentional, as an implicit copy would result in a significant performance slowdown for deep data structures. If you need to keep the original objects unmodified, you can use the deepcopy method: @@ -96,3 +95,13 @@ Example: If a strategy fails, an exception should not be raised. This is to ensure it can be chained with other strategies, or the fall-back. +Uniqueness of elements when merging +=================================== + +Some strategies require determining the uniqueness +of the elements. Since deepmerge primarily deals with nested +types, this includes structures that are not hashable such as +dictionaries. + +In those cases, built-in deepmerge strategies will call repr() +on the object and hash that value instead. \ No newline at end of file
toumorokoshi/deepmerge
4ac5ff666d06cb072ff200ff4255d86d950b71a4
diff --git a/deepmerge/tests/strategy/test_list.py b/deepmerge/tests/strategy/test_list.py index 39215a9..7eb2d3b 100644 --- a/deepmerge/tests/strategy/test_list.py +++ b/deepmerge/tests/strategy/test_list.py @@ -19,3 +19,15 @@ def test_strategy_append_unique(custom_merger): expected = [1, 3, 2, 5, 4] actual = custom_merger.merge(base, nxt) assert actual == expected + + +def test_strategy_append_unique_nested_dict(custom_merger): + """append_unique should work even with unhashable objects + Like dicts. + """ + base = [{"bar": ["bob"]}] + nxt = [{"bar": ["baz"]}] + + result = custom_merger.merge(base, nxt) + + assert result == [{"bar": ["bob"]}, {"bar": ["baz"]}]
list merge strategy append_unique does not work for lists of dicts Hi developers, especially @morph027 , I get an error when trying to apply list merge strategy `append_unique` for lists of dictionaries. I am using deepmerge 1.0.1 and python 3.7.7. When I am running the following code ```python from deepmerge import Merger my_merger = Merger( # pass in a list of tuple, with the # strategies you are looking to apply # to each type. [ (list, ["append_unique"]), (dict, ["merge"]), (set, ["union"]) ], # next, choose the fallback strategies, # applied to all other types: ["override"], # finally, choose the strategies in # the case where the types conflict: ["override"] ) base = {"foo": ["bar"]} next = {"foo": ["bar","baz"]} result = my_merger.merge(base, next) assert result == {'foo': ['bar', 'baz']} base = {"foo": [{"bar": ["bob"]}]} next = {"foo": [{"bar": ["baz"]}]} result = my_merger.merge(base, next) assert result == {'foo': [{'bar', ["bob"]}, {"bar": ["baz"]}]} ``` I get the following exception ```bash python3 test_merge.py Traceback (most recent call last): File "test_merge.py", line 29, in <module> result = my_merger.merge(base, next) File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/merger.py", line 33, in merge return self.value_strategy([], base, nxt) File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/merger.py", line 43, in value_strategy return strategy(self, path, base, nxt) File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/strategy/core.py", line 35, in __call__ ret_val = s(*args, **kwargs) File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/strategy/dict.py", line 23, in strategy_merge base[k] = config.value_strategy(path + [k], base[k], v) File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/merger.py", line 43, in value_strategy return strategy(self, path, base, nxt) File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/strategy/core.py", line 35, in __call__ ret_val = s(*args, **kwargs) File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/strategy/list.py", line 29, in strategy_append_unique base_as_set = set(base) TypeError: unhashable type: 'dict' ``` Best, Oliver
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "deepmerge/tests/strategy/test_list.py::test_strategy_append_unique_nested_dict" ]
[ "deepmerge/tests/strategy/test_list.py::test_strategy_append_unique" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-10-21T06:00:20Z"
mit
tox-dev__tox-1614
diff --git a/docs/changelog/1575.feature.rst b/docs/changelog/1575.feature.rst new file mode 100644 index 00000000..7a36ab20 --- /dev/null +++ b/docs/changelog/1575.feature.rst @@ -0,0 +1,3 @@ +Implement support for building projects +having :pep:`517#in-tree-build-backends` ``backend-path`` setting - +by :user:`webknjaz` diff --git a/src/tox/helper/build_isolated.py b/src/tox/helper/build_isolated.py index 55ea41b8..3d897097 100644 --- a/src/tox/helper/build_isolated.py +++ b/src/tox/helper/build_isolated.py @@ -1,10 +1,40 @@ +"""PEP 517 build backend invocation script. + +It accepts externally parsed build configuration from `[build-system]` +in `pyproject.toml` and invokes an API endpoint for building an sdist +tarball. +""" + +import os import sys + +def _ensure_module_in_paths(module, paths): + """Verify that the imported backend belongs in-tree.""" + if not paths: + return + + module_path = os.path.normcase(os.path.abspath(module.__file__)) + normalized_paths = (os.path.normcase(os.path.abspath(path)) for path in paths) + + if any(os.path.commonprefix((module_path, path)) == path for path in normalized_paths): + return + + raise SystemExit( + "build-backend ({!r}) must exist in one of the paths " + "specified by backend-path ({!r})".format(module, paths), + ) + + dist_folder = sys.argv[1] backend_spec = sys.argv[2] backend_obj = sys.argv[3] if len(sys.argv) >= 4 else None +backend_paths = sys.argv[4].split(os.path.pathsep) if sys.argv[4] else [] + +sys.path[:0] = backend_paths backend = __import__(backend_spec, fromlist=["_trash"]) +_ensure_module_in_paths(backend, backend_paths) if backend_obj: backend = getattr(backend, backend_obj) diff --git a/src/tox/package/builder/isolated.py b/src/tox/package/builder/isolated.py index c02aa109..38ebe8ea 100644 --- a/src/tox/package/builder/isolated.py +++ b/src/tox/package/builder/isolated.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +import os from collections import namedtuple import six @@ -11,7 +12,9 @@ from tox import reporter from tox.config import DepConfig, get_py_project_toml from tox.constants import BUILD_ISOLATED, BUILD_REQUIRE_SCRIPT -BuildInfo = namedtuple("BuildInfo", ["requires", "backend_module", "backend_object"]) +BuildInfo = namedtuple( + "BuildInfo", ["requires", "backend_module", "backend_object", "backend_paths"], +) def build(config, session): @@ -84,7 +87,12 @@ def get_build_info(folder): module = args[0] obj = args[1] if len(args) > 1 else "" - return BuildInfo(requires, module, obj) + backend_paths = build_system.get("backend-path", []) + if not isinstance(backend_paths, list): + abort("backend-path key at build-system section must be a list, if specified") + backend_paths = [folder.join(p) for p in backend_paths] + + return BuildInfo(requires, module, obj, backend_paths) def perform_isolated_build(build_info, package_venv, dist_dir, setup_dir): @@ -103,6 +111,7 @@ def perform_isolated_build(build_info, package_venv, dist_dir, setup_dir): str(dist_dir), build_info.backend_module, build_info.backend_object, + os.path.pathsep.join(str(p) for p in build_info.backend_paths), ], returnout=True, action=action,
tox-dev/tox
268ca020ef8bf1600123add72eb4984cba970a4d
diff --git a/tests/unit/package/builder/test_package_builder_isolated.py b/tests/unit/package/builder/test_package_builder_isolated.py index d43ed5f1..63e0685a 100644 --- a/tests/unit/package/builder/test_package_builder_isolated.py +++ b/tests/unit/package/builder/test_package_builder_isolated.py @@ -138,3 +138,18 @@ def test_package_isolated_toml_bad_backend(initproj): build-backend = [] """, ) + + +def test_package_isolated_toml_bad_backend_path(initproj): + """Verify that a non-list 'backend-path' is forbidden.""" + toml_file_check( + initproj, + 6, + "backend-path key at build-system section must be a list, if specified", + """ + [build-system] + requires = [] + build-backend = 'setuptools.build_meta' + backend-path = 42 + """, + )
Support PEP517 in-tree build backend I'm getting ```python-traceback ERROR: invocation failed (exit code 1), logfile: ~/src/github/ansible/pylibssh/.tox/.package/log/.package-2.log =================================== log start ==================================== Traceback (most recent call last): File "~/.pyenv/versions/3.7.1/lib/python3.7/site-packages/tox/helper/build_requires.py", line 7, in <module> backend = __import__(backend_spec, fromlist=[None]) ModuleNotFoundError: No module named 'pep517_backend' ``` because of ```console $ cat pyproject.toml [build-system] ... backend-path = ["bin"] # requires 'Pip>=20' or 'pep517>=0.6.0' build-backend = "pep517_backend" ``` and ```console $ ls -l bin/pep517_backend.py -rw-r--r-- 1 me me 6.2K May 5 00:43 bin/pep517_backend.py ``` I think this should be considered a bug. Pip supports it starting v20 and pep517 supports it with v0.7 or so.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_backend_path" ]
[ "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_backend", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_requires", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_backend", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_requires", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_build_system" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-07-13T22:47:10Z"
mit
tox-dev__tox-1655
diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 8eac46c9..1c57f659 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -78,6 +78,7 @@ Morgan Fainberg Naveen S R Nick Douma Nick Prendergast +Nicolas Vivet Oliver Bestwalter Pablo Galindo Paul Moore diff --git a/docs/changelog/1654.bugfix.rst b/docs/changelog/1654.bugfix.rst new file mode 100644 index 00000000..0d20d48a --- /dev/null +++ b/docs/changelog/1654.bugfix.rst @@ -0,0 +1,1 @@ +Support for PEP517 in-tree build backend-path key in ``get-build-requires``. - by :user:`nizox` diff --git a/src/tox/helper/build_requires.py b/src/tox/helper/build_requires.py index cd074f97..aafb258c 100644 --- a/src/tox/helper/build_requires.py +++ b/src/tox/helper/build_requires.py @@ -1,8 +1,12 @@ import json +import os import sys backend_spec = sys.argv[1] backend_obj = sys.argv[2] if len(sys.argv) >= 3 else None +backend_paths = sys.argv[3].split(os.path.pathsep) if len(sys.argv) >= 4 else [] + +sys.path[:0] = backend_paths backend = __import__(backend_spec, fromlist=["_trash"]) if backend_obj: diff --git a/src/tox/package/builder/isolated.py b/src/tox/package/builder/isolated.py index 38ebe8ea..998ce25f 100644 --- a/src/tox/package/builder/isolated.py +++ b/src/tox/package/builder/isolated.py @@ -92,6 +92,15 @@ def get_build_info(folder): abort("backend-path key at build-system section must be a list, if specified") backend_paths = [folder.join(p) for p in backend_paths] + normalized_folder = os.path.normcase(str(folder.realpath())) + normalized_paths = (os.path.normcase(str(path.realpath())) for path in backend_paths) + + if not all( + os.path.commonprefix((normalized_folder, path)) == normalized_folder + for path in normalized_paths + ): + abort("backend-path must exist in the project root") + return BuildInfo(requires, module, obj, backend_paths) @@ -129,6 +138,7 @@ def get_build_requires(build_info, package_venv, setup_dir): BUILD_REQUIRE_SCRIPT, build_info.backend_module, build_info.backend_object, + os.path.pathsep.join(str(p) for p in build_info.backend_paths), ], returnout=True, action=action,
tox-dev/tox
23dd96f5a6891fb13e298ea941bd457931421ffd
diff --git a/tests/unit/package/builder/test_package_builder_isolated.py b/tests/unit/package/builder/test_package_builder_isolated.py index 63e0685a..458e43bb 100644 --- a/tests/unit/package/builder/test_package_builder_isolated.py +++ b/tests/unit/package/builder/test_package_builder_isolated.py @@ -153,3 +153,43 @@ def test_package_isolated_toml_bad_backend_path(initproj): backend-path = 42 """, ) + + +def test_package_isolated_toml_backend_path_outside_root(initproj): + """Verify that a 'backend-path' outside the project root is forbidden.""" + toml_file_check( + initproj, + 6, + "backend-path must exist in the project root", + """ + [build-system] + requires = [] + build-backend = 'setuptools.build_meta' + backend-path = ['..'] + """, + ) + + +def test_verbose_isolated_build_in_tree(initproj, mock_venv, cmd): + initproj( + "example123-0.5", + filedefs={ + "tox.ini": """ + [tox] + isolated_build = true + """, + "build.py": """ + from setuptools.build_meta import * + """, + "pyproject.toml": """ + [build-system] + requires = ["setuptools >= 35.0.2"] + build-backend = 'build' + backend-path = ['.'] + """, + }, + ) + result = cmd("--sdistonly", "-v", "-v", "-v", "-e", "py") + assert "running sdist" in result.out, result.out + assert "running egg_info" in result.out, result.out + assert "Writing example123-0.5{}setup.cfg".format(os.sep) in result.out, result.out
PEP517 in-tree build backend-path support in get-build-requires Using tox 3.19, in-tree build with `backend-path` fails with: ``` .package start: get-build-requires /Users/nicolas/Workspaces/proj/.tox/.package setting PATH=/Users/nicolas/Workspaces/proj/.tox/.package/bin:/Users/nicolas/.virtualenvs/proj-python3.8/bin:/opt/local/bin:/opt/local/sbin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin [15159] /Users/nicolas/Workspaces/proj$ /Users/nicolas/Workspaces/proj/.tox/.package/bin/python /Users/nicolas/.virtualenvs/proj-python3.8/lib/python3.8/site-packages/tox/helper/build_requires.py build_package '' >.tox/.package/log/.package-0.log ERROR: invocation failed (exit code 1), logfile: /Users/nicolas/Workspaces/proj/.tox/.package/log/.package-0.log =================================================================================== log start ==================================================================================== Traceback (most recent call last): File "/Users/nicolas/.virtualenvs/proj-python3.8/lib/python3.8/site-packages/tox/helper/build_requires.py", line 7, in <module> backend = __import__(backend_spec, fromlist=["_trash"]) ModuleNotFoundError: No module named 'build_package' ``` `pyproject.toml` content is: ``` [build-system] requires = [ "setuptools >= 40.9.0", "wheel", ] build-backend = "build_package" backend-path = ["scripts"] ``` A quick patch of `tox/helper/build_requires.py` to use the backend paths similarly to `tox/helper/build_isolated.py` fixes the issue. I can do a PR but the patch duplicates the `_ensure_module_in_paths` function.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_backend_path_outside_root" ]
[ "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_backend", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_backend", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_build_system", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_requires", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_backend_path", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_requires" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-08-19T12:45:02Z"
mit
tox-dev__tox-1860
diff --git a/docs/changelog/1772.bugfix.rst b/docs/changelog/1772.bugfix.rst new file mode 100644 index 00000000..c96ba979 --- /dev/null +++ b/docs/changelog/1772.bugfix.rst @@ -0,0 +1,2 @@ +Fix a killed tox (via SIGTERM) leaving the commands subprocesses running +by handling it as if it were a KeyboardInterrupt - by :user:`dajose` diff --git a/docs/config.rst b/docs/config.rst index f3bb1b12..01bc9b7d 100644 --- a/docs/config.rst +++ b/docs/config.rst @@ -607,9 +607,9 @@ Complete list of settings that you can put into ``testenv*`` sections: .. versionadded:: 3.15.2 - When an interrupt is sent via Ctrl+C, the SIGINT is sent to all foreground - processes. The :conf:``suicide_timeout`` gives the running process time to - cleanup and exit before receiving (in some cases, a duplicate) SIGINT from + When an interrupt is sent via Ctrl+C or the tox process is killed with a SIGTERM, + a SIGINT is sent to all foreground processes. The :conf:``suicide_timeout`` gives + the running process time to cleanup and exit before receiving (in some cases, a duplicate) SIGINT from tox. .. conf:: interrupt_timeout ^ float ^ 0.3 diff --git a/src/tox/action.py b/src/tox/action.py index b5381b83..e7f9b77b 100644 --- a/src/tox/action.py +++ b/src/tox/action.py @@ -49,6 +49,10 @@ class Action(object): self.suicide_timeout = suicide_timeout self.interrupt_timeout = interrupt_timeout self.terminate_timeout = terminate_timeout + if is_main_thread(): + # python allows only main thread to install signal handlers + # see https://docs.python.org/3/library/signal.html#signals-and-threads + self._install_sigterm_handler() def __enter__(self): msg = "{} {}".format(self.msg, " ".join(map(str, self.args))) @@ -278,3 +282,12 @@ class Action(object): new_args.append(str(arg)) return new_args + + def _install_sigterm_handler(self): + """Handle sigterm as if it were a keyboardinterrupt""" + + def sigterm_handler(signum, frame): + reporter.error("Got SIGTERM, handling it as a KeyboardInterrupt") + raise KeyboardInterrupt() + + signal.signal(signal.SIGTERM, sigterm_handler)
tox-dev/tox
3d80588df8ba2e3a382b4345d2bf6cea44d3f901
diff --git a/tests/integration/test_provision_int.py b/tests/integration/test_provision_int.py index 0ae411b8..05fb1a66 100644 --- a/tests/integration/test_provision_int.py +++ b/tests/integration/test_provision_int.py @@ -73,7 +73,8 @@ def test_provision_from_pyvenv(initproj, cmd, monkeypatch): "sys.platform == 'win32'", reason="triggering SIGINT reliably on Windows is hard", ) -def test_provision_interrupt_child(initproj, monkeypatch, capfd): [email protected]("signal_type", [signal.SIGINT, signal.SIGTERM]) +def test_provision_interrupt_child(initproj, monkeypatch, capfd, signal_type): monkeypatch.delenv(str("PYTHONPATH"), raising=False) monkeypatch.setenv(str("TOX_REPORTER_TIMESTAMP"), str("1")) initproj( @@ -123,7 +124,7 @@ def test_provision_interrupt_child(initproj, monkeypatch, capfd): # 1 process for the host tox, 1 for the provisioned assert len(all_process) >= 2, all_process - process.send_signal(signal.CTRL_C_EVENT if sys.platform == "win32" else signal.SIGINT) + process.send_signal(signal.CTRL_C_EVENT if sys.platform == "win32" else signal_type) process.communicate() out, err = capfd.readouterr() assert ".tox KeyboardInterrupt: from" in out, out
kill tox process does not kill running commands Tox seems to not being sending the sigterm signal back to the subprocess it is driving. This can be reproduced with this configuration, and killing the tox process. `tox.ini` ```ini [tox] skipsdist = True [testenv:papa] commands = {envpython} {toxinidir}/papa.py ``` `papa.py` ```python from time import sleep while True: sleep(1) print('papa') ``` from a terminal run ``` tox -e papa ``` and on another, search the process and kill it, for example: ``` diazdavi@darker:/tmp$ ps -aux | grep tox diazdavi 779128 6.3 0.1 114196 22588 pts/0 S+ 17:00 0:00 /usr/bin/python3 /usr/local/bin/tox -e papa diazdavi 779140 0.5 0.0 26912 9500 pts/0 S+ 17:00 0:00 /tmp/.tox/papa/bin/python papa.py diazdavi 779178 0.0 0.0 17664 2700 pts/2 S+ 17:00 0:00 grep --color=auto tox diazdavi@darker:/tmp$ kill 779128 ``` The first terminal will keep showing the prints :(
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/integration/test_provision_int.py::test_provision_interrupt_child[Signals.SIGTERM]" ]
[ "tests/integration/test_provision_int.py::test_provision_interrupt_child[Signals.SIGINT]" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-01-19T23:34:09Z"
mit
tox-dev__tox-1869
diff --git a/docs/changelog/1868.feature.rst b/docs/changelog/1868.feature.rst new file mode 100644 index 00000000..8511d2f4 --- /dev/null +++ b/docs/changelog/1868.feature.rst @@ -0,0 +1,2 @@ +Not all package dependencies are installed when different tox environments in the same run use different set of +extras - by :user:`gaborbernat`. diff --git a/src/tox/tox_env/python/api.py b/src/tox/tox_env/python/api.py index 015ff16b..e5402a0f 100644 --- a/src/tox/tox_env/python/api.py +++ b/src/tox/tox_env/python/api.py @@ -222,8 +222,7 @@ class Python(ToxEnv, ABC): # bail out and force recreate logging.warning(f"recreate env because dependencies removed: {', '.join(str(i) for i in missing)}") raise Recreate - new_deps_str = set(conf_deps) - set(old) - new_deps = [PythonDep(Requirement(i)) for i in new_deps_str] + new_deps = [PythonDep(Requirement(i)) for i in conf_deps if i not in old] self.install_python_packages(packages=new_deps, of_type=of_type) return False diff --git a/src/tox/tox_env/python/virtual_env/package/api.py b/src/tox/tox_env/python/virtual_env/package/api.py index b3a0a11b..738af1e1 100644 --- a/src/tox/tox_env/python/virtual_env/package/api.py +++ b/src/tox/tox_env/python/virtual_env/package/api.py @@ -1,6 +1,7 @@ import os import sys from contextlib import contextmanager +from copy import deepcopy from enum import Enum from pathlib import Path from threading import RLock @@ -95,6 +96,7 @@ class Pep517VirtualEnvPackage(VirtualEnv, PythonPackage, Frontend): self._build_wheel_cache: Optional[WheelResult] = None self._backend_executor: Optional[LocalSubProcessPep517Executor] = None self._package_dependencies: Optional[List[Requirement]] = None + self._package_dev_dependencies: Optional[List[Requirement]] = None self._lock = RLock() # can build only one package at a time self._package: Dict[Tuple[PackageType, str], Any] = {} self._run_env_to_wheel_builder_env: Dict[str, PackageToxEnv] = {} @@ -203,27 +205,28 @@ class Pep517VirtualEnvPackage(VirtualEnv, PythonPackage, Frontend): def get_package_dependencies(self, for_env: EnvConfigSet) -> List[Requirement]: env_name = for_env.name - extras: Set[str] = for_env["extras"] with self._lock: if self._package_dependencies is None: # pragma: no branch self._ensure_meta_present() - dependencies: List[Requirement] = [] - of_type, _ = self._run_env_to_info[env_name] - if of_type == PackageType.dev: - dependencies.extend(self.requires()) - dependencies.extend(self.get_requires_for_build_sdist().requires) - dependencies.extend(self.discover_package_dependencies(self._distribution_meta, extras)) - self._package_dependencies = dependencies - return self._package_dependencies + requires: List[str] = cast(PathDistribution, self._distribution_meta).requires or [] + self._package_dependencies = [Requirement(i) for i in requires] + of_type, _ = self._run_env_to_info[env_name] + if of_type == PackageType.dev and self._package_dev_dependencies is None: + self._package_dev_dependencies = [*self.requires(), *self.get_requires_for_build_sdist().requires] + if of_type == PackageType.dev: + result: List[Requirement] = cast(List[Requirement], self._package_dev_dependencies).copy() + else: + result = [] + extras: Set[str] = for_env["extras"] + result.extend(self.dependencies_with_extras(self._package_dependencies, extras)) + return result @staticmethod - def discover_package_dependencies(meta: PathDistribution, extras: Set[str]) -> List[Requirement]: + def dependencies_with_extras(deps: List[Requirement], extras: Set[str]) -> List[Requirement]: result: List[Requirement] = [] - requires = meta.requires or [] - for req_str in requires: - req = Requirement(req_str) + for req in deps: + req = deepcopy(req) markers: List[Union[str, Tuple[Variable, Variable, Variable]]] = getattr(req.marker, "_markers", []) or [] - # find the extra marker (if has) _at: Optional[int] = None extra: Optional[str] = None @@ -241,7 +244,6 @@ class Pep517VirtualEnvPackage(VirtualEnv, PythonPackage, Frontend): if len(markers) == 0: req.marker = None break - # continue only if this extra should be included if not (extra is None or extra in extras): continue result.append(req)
tox-dev/tox
f17774fdd9fceea40564110bdeac7ce4b30ed677
diff --git a/tests/tox_env/python/virtual_env/test_package.py b/tests/tox_env/python/virtual_env/test_package.py index 9ba1f14e..d3ad4328 100644 --- a/tests/tox_env/python/virtual_env/test_package.py +++ b/tests/tox_env/python/virtual_env/test_package.py @@ -1,5 +1,6 @@ import sys from itertools import zip_longest +from pathlib import Path from textwrap import dedent import pytest @@ -37,7 +38,7 @@ def test_tox_ini_package_type_invalid(tox_project: ToxProjectCreator) -> None: @pytest.fixture(scope="session") -def pkg_with_extras(tmp_path_factory: TempPathFactory) -> PathDistribution: # type: ignore[no-any-unimported] +def pkg_with_extras_project(tmp_path_factory: TempPathFactory) -> Path: py_ver = ".".join(str(i) for i in sys.version_info[0:2]) setup_cfg = f""" [metadata] @@ -64,14 +65,19 @@ def pkg_with_extras(tmp_path_factory: TempPathFactory) -> PathDistribution: # t (tmp_path / "setup.py").write_text("from setuptools import setup; setup()") toml = '[build-system]\nrequires=["setuptools", "wheel"]\nbuild-backend = "setuptools.build_meta"' (tmp_path / "pyproject.toml").write_text(toml) - frontend = SubprocessFrontend(*SubprocessFrontend.create_args_from_folder(tmp_path)[:-1]) - meta = tmp_path / "meta" + return tmp_path + + [email protected](scope="session") +def pkg_with_extras(pkg_with_extras_project: Path) -> PathDistribution: # type: ignore[no-any-unimported] + frontend = SubprocessFrontend(*SubprocessFrontend.create_args_from_folder(pkg_with_extras_project)[:-1]) + meta = pkg_with_extras_project / "meta" result = frontend.prepare_metadata_for_build_wheel(meta) return Distribution.at(result.metadata) def test_load_dependency_no_extra(pkg_with_extras: PathDistribution) -> None: # type: ignore[no-any-unimported] - result = Pep517VirtualEnvPackage.discover_package_dependencies(pkg_with_extras, set()) + result = Pep517VirtualEnvPackage.dependencies_with_extras([Requirement(i) for i in pkg_with_extras.requires], set()) for left, right in zip_longest(result, (Requirement("appdirs>=1.4.3"), Requirement("colorama>=0.4.3"))): assert isinstance(right, Requirement) assert str(left) == str(right) @@ -79,7 +85,9 @@ def test_load_dependency_no_extra(pkg_with_extras: PathDistribution) -> None: # def test_load_dependency_many_extra(pkg_with_extras: PathDistribution) -> None: # type: ignore[no-any-unimported] py_ver = ".".join(str(i) for i in sys.version_info[0:2]) - result = Pep517VirtualEnvPackage.discover_package_dependencies(pkg_with_extras, {"docs", "testing"}) + result = Pep517VirtualEnvPackage.dependencies_with_extras( + [Requirement(i) for i in pkg_with_extras.requires], {"docs", "testing"} + ) exp = [ Requirement("appdirs>=1.4.3"), Requirement("colorama>=0.4.3"), @@ -91,3 +99,19 @@ def test_load_dependency_many_extra(pkg_with_extras: PathDistribution) -> None: for left, right in zip_longest(result, exp): assert isinstance(right, Requirement) assert str(left) == str(right) + + +def test_get_package_deps_different_extras(pkg_with_extras_project: Path, tox_project: ToxProjectCreator) -> None: + proj = tox_project({"tox.ini": "[testenv:a]\npackage=dev\nextras=docs\n[testenv:b]\npackage=sdist\nextras=format"}) + execute_calls = proj.patch_execute(lambda r: 0 if "install" in r.run_id else None) + result = proj.run("r", "--root", str(pkg_with_extras_project), "-e", "a,b") + result.assert_success() + installs = { + i[0][0].conf.name: i[0][3].cmd[5:] + for i in execute_calls.call_args_list + if i[0][3].run_id.startswith("install_package_deps") + } + assert installs == { + "a": ["setuptools", "wheel", "appdirs>=1.4.3", "colorama>=0.4.3", "sphinx>=3", "sphinx-rtd-theme<1,>=0.4.3"], + "b": ["appdirs>=1.4.3", "colorama>=0.4.3", "black>=3", "flake8"], + }
tox4: not all extras are evaluated I have a tox ini with a `testenv` incl `extras = test` and a `testenv:coverage` incl. `extras=test, coverage`. The coverage env fails with `pytest-cov` missing - only in `tox4`. # reproduce ``` git clone [email protected]:morepath/more.cerberus.git cd more.cerburus tox4 -r ``` This only happens when running all envs - the error does not show when running `coverage` in isolation. Tested both with current tox pre and tox head. ``` coverage: remove tox env folder /home/jugmac00/Projects/more.cerberus/.tox/4/coverage coverage: install_package_deps> .tox/4/coverage/bin/python -I -m pip install 'setuptools>=40.8.0' wheel 'cerberus<2.0.0,>=1.3.2' 'morepath>=0.19' 'pytest>=2.9.1' pytest-remove-stale-bytecode webtest coverage: install_package> .tox/4/coverage/bin/python -I -m pip install --no-deps --force-reinstall --no-build-isolation -e /home/jugmac00/Projects/more.cerberus coverage: commands[0]> pytest --cov --cov-fail-under=100 ERROR: usage: pytest [options] [file_or_dir] [file_or_dir] [...] pytest: error: unrecognized arguments: --cov --cov-fail-under=100 inifile: /home/jugmac00/Projects/more.cerberus/setup.cfg rootdir: /home/jugmac00/Projects/more.cerberus coverage: exit 4 (0.20 seconds) /home/jugmac00/Projects/more.cerberus> pytest --cov --cov-fail-under=100 .pkg: _exit> python /home/jugmac00/opt/tox4/lib/python3.8/site-packages/tox/util/pep517/backend.py True setuptools.build_meta __legacy__ coverage: FAIL βœ– in 4.42 seconds ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/tox_env/python/virtual_env/test_package.py::test_load_dependency_many_extra", "tests/tox_env/python/virtual_env/test_package.py::test_get_package_deps_different_extras", "tests/tox_env/python/virtual_env/test_package.py::test_load_dependency_no_extra" ]
[ "tests/tox_env/python/virtual_env/test_package.py::test_tox_ini_package_type_valid[sdist]", "tests/tox_env/python/virtual_env/test_package.py::test_tox_ini_package_type_valid[dev]", "tests/tox_env/python/virtual_env/test_package.py::test_tox_ini_package_type_valid[wheel]", "tests/tox_env/python/virtual_env/test_package.py::test_tox_ini_package_type_invalid" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-01-23T16:03:22Z"
mit
tox-dev__tox-1889
diff --git a/docs/changelog/1831.bugfix.rst b/docs/changelog/1831.bugfix.rst new file mode 100644 index 00000000..f5ef2133 --- /dev/null +++ b/docs/changelog/1831.bugfix.rst @@ -0,0 +1,1 @@ +Support aliases in show config key specification (will print with the primary key) - by :user:`gaborbernat`. diff --git a/docs/changelog/1831.feature.rst b/docs/changelog/1831.feature.rst new file mode 100644 index 00000000..c8baa606 --- /dev/null +++ b/docs/changelog/1831.feature.rst @@ -0,0 +1,2 @@ +Support comments via the ``#`` character within the ini configuration (to force a literal ``#`` use ``\#``) - +by :user:`gaborbernat`. diff --git a/src/tox/config/loader/ini/__init__.py b/src/tox/config/loader/ini/__init__.py index 54eb51fa..a8867c02 100644 --- a/src/tox/config/loader/ini/__init__.py +++ b/src/tox/config/loader/ini/__init__.py @@ -1,4 +1,5 @@ import inspect +import re from concurrent.futures import Future from configparser import ConfigParser, SectionProxy from contextlib import contextmanager @@ -30,11 +31,21 @@ class IniLoader(StrConvert, Loader[str]): def load_raw(self, key: str, conf: Optional[Config], env_name: Optional[str]) -> str: value = self._section[key] - collapsed_newlines = value.replace("\\\r\n", "").replace("\\\n", "") # collapse explicit new-line escape + collapsed_newlines = value.replace("\r", "").replace("\\\n", "") # collapse explicit new-line escape + # strip comments + strip_comments = "\n".join( + no_comment + for no_comment in ( + re.sub(r"(\s)*(?<!\\)#.*", "", line) + for line in collapsed_newlines.split("\n") + if not line.startswith("#") + ) + if no_comment.strip() + ) if conf is None: # conf is None when we're loading the global tox configuration file for the CLI - factor_filtered = collapsed_newlines # we don't support factor and replace functionality there + factor_filtered = strip_comments # we don't support factor and replace functionality there else: - factor_filtered = filter_for_env(collapsed_newlines, env_name) # select matching factors + factor_filtered = filter_for_env(strip_comments, env_name) # select matching factors return factor_filtered @contextmanager diff --git a/src/tox/config/sets.py b/src/tox/config/sets.py index 6e31a664..2cd735b9 100644 --- a/src/tox/config/sets.py +++ b/src/tox/config/sets.py @@ -1,4 +1,3 @@ -from collections import OrderedDict from pathlib import Path from typing import ( TYPE_CHECKING, @@ -36,7 +35,8 @@ class ConfigSet: self._conf = conf self.loaders: List[Loader[Any]] = [] self._defined: Dict[str, ConfigDefinition[Any]] = {} - self._keys: Dict[str, None] = OrderedDict() + self._keys: Dict[str, None] = {} + self._alias: Dict[str, str] = {} def add_config( self, @@ -76,6 +76,8 @@ class ConfigSet: raise ValueError(f"config {key} already defined") else: self._keys[key] = None + for item in keys: + self._alias[item] = key for key in keys: self._defined[key] = definition return definition @@ -101,6 +103,9 @@ class ConfigSet: def __iter__(self) -> Iterator[str]: return iter(self._keys.keys()) + def __contains__(self, item: str) -> bool: + return item in self._alias + def unused(self) -> List[str]: """Return a list of keys present in the config source but not used""" found: Set[str] = set() @@ -109,6 +114,9 @@ class ConfigSet: found -= self._defined.keys() return list(sorted(found)) + def primary_key(self, key: str) -> str: + return self._alias[key] + class CoreConfigSet(ConfigSet): def __init__(self, conf: "Config", root: Path) -> None: diff --git a/src/tox/session/cmd/show_config.py b/src/tox/session/cmd/show_config.py index 3711866f..934b5038 100644 --- a/src/tox/session/cmd/show_config.py +++ b/src/tox/session/cmd/show_config.py @@ -99,6 +99,7 @@ def print_conf(is_colored: bool, conf: ConfigSet, keys: Iterable[str]) -> None: for key in keys if keys else conf: if key not in conf: continue + key = conf.primary_key(key) try: value = conf[key] as_str, multi_line = stringify(value)
tox-dev/tox
8cbce69895511e989ed3677c2fb953005701bcd0
diff --git a/tests/config/loader/ini/test_ini_loader.py b/tests/config/loader/ini/test_ini_loader.py index f2fb13d8..d75dfe75 100644 --- a/tests/config/loader/ini/test_ini_loader.py +++ b/tests/config/loader/ini/test_ini_loader.py @@ -38,3 +38,21 @@ def test_ini_loader_raw_strip_escaped_newline(mk_ini_conf: Callable[[str], Confi loader = IniLoader("tox", mk_ini_conf(f"[tox]{sep}a=b\\{sep} c"), []) result = loader.load(key="a", of_type=str, conf=None, env_name=None, chain=[], kwargs={}) assert result == "bc" + + [email protected]( + ["case", "result"], + [ + ("# a", ""), + ("#", ""), + ("a # w", "a"), + ("a\t# w", "a"), + ("a# w", "a"), + ("a\\# w", "a\\# w"), + ("#a\n b # w\n w", "b\nw"), + ], +) +def test_ini_loader_strip_comments(mk_ini_conf: Callable[[str], ConfigParser], case: str, result: str) -> None: + loader = IniLoader("tox", mk_ini_conf(f"[tox]\na={case}"), []) + outcome = loader.load(key="a", of_type=str, conf=None, env_name=None, chain=[], kwargs={}) + assert outcome == result diff --git a/tests/session/cmd/test_show_config.py b/tests/session/cmd/test_show_config.py index 1cd3fcda..c0232d40 100644 --- a/tests/session/cmd/test_show_config.py +++ b/tests/session/cmd/test_show_config.py @@ -141,3 +141,9 @@ def test_show_config_select_only(tox_project: ToxProjectCreator) -> None: parser.read_string(result.out) sections = set(parser.sections()) assert sections == {"testenv:.pkg", "testenv:b"} + + +def test_show_config_alias(tox_project: ToxProjectCreator) -> None: + outcome = tox_project({"tox.ini": ""}).run("c", "-e", "py", "-k", "setenv") + outcome.assert_success() + assert "set_env = " in outcome.out
pass_env does not support comments Comments can be used to comment reason for setting values, see example in ``tox4 c -e py39-core`` of ansible-lint. Perhaps within the ini file we should always remove trailing comments on every type of input.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_strip_comments[#-]", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_strip_comments[a", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_strip_comments[a\\t#", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_strip_comments[#", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_strip_comments[#a\\n", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_strip_comments[a#", "tests/session/cmd/test_show_config.py::test_show_config_alias" ]
[ "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_raw_strip_escaped_newline[\\r\\n]", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_keys", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_has_section", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_strip_comments[a\\\\#", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_repr", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_raw", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_has_no_section", "tests/config/loader/ini/test_ini_loader.py::test_ini_loader_raw_strip_escaped_newline[\\n]", "tests/session/cmd/test_show_config.py::test_pass_env_config_default[True]", "tests/session/cmd/test_show_config.py::test_show_config_pkg_env_skip", "tests/session/cmd/test_show_config.py::test_show_config_unused", "tests/session/cmd/test_show_config.py::test_show_config_commands", "tests/session/cmd/test_show_config.py::test_pass_env_config_default[False]", "tests/session/cmd/test_show_config.py::test_show_config_filter_keys", "tests/session/cmd/test_show_config.py::test_show_config_default_run_env", "tests/session/cmd/test_show_config.py::test_show_config_select_only", "tests/session/cmd/test_show_config.py::test_show_config_exception" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-01-31T09:47:26Z"
mit
tox-dev__tox-1940
diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 1e085ba7..55dd6d1b 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -4,6 +4,7 @@ Alexander Loechel Alexander Schepanovski Alexandre Conrad Allan Feldman +Andrey Bienkowski Andrii Soldatenko Andrzej Klajnert Anthon van der Neuth diff --git a/docs/changelog/1921.feature.rst b/docs/changelog/1921.feature.rst new file mode 100644 index 00000000..64ea8c15 --- /dev/null +++ b/docs/changelog/1921.feature.rst @@ -0,0 +1,6 @@ +tox can now be invoked with a new ``--no-provision`` flag that prevents provision, +if :conf:`requires` or :conf:`minversion` are not satisfied, +tox will fail; +if a path is specified as an argument to the flag +(e.g. as ``tox --no-provision missing.json``) and provision is prevented, +provision metadata are written as JSON to that path - by :user:`hroncok` diff --git a/docs/config.rst b/docs/config.rst index 01bc9b7d..31e96331 100644 --- a/docs/config.rst +++ b/docs/config.rst @@ -38,6 +38,11 @@ Global settings are defined under the ``tox`` section as: than this the tool will create an environment and provision it with a version of tox that satisfies this under :conf:`provision_tox_env`. + .. versionchanged:: 3.23.0 + + When tox is invoked with the ``--no-provision`` flag, + the provision won't be attempted, tox will fail instead. + .. conf:: requires ^ LIST of PEP-508 .. versionadded:: 3.2.0 @@ -54,6 +59,11 @@ Global settings are defined under the ``tox`` section as: requires = tox-pipenv setuptools >= 30.0.0 + .. versionchanged:: 3.23.0 + + When tox is invoked with the ``--no-provision`` flag, + the provision won't be attempted, tox will fail instead. + .. conf:: provision_tox_env ^ string ^ .tox .. versionadded:: 3.8.0 @@ -61,6 +71,11 @@ Global settings are defined under the ``tox`` section as: Name of the virtual environment used to provision a tox having all dependencies specified inside :conf:`requires` and :conf:`minversion`. + .. versionchanged:: 3.23.0 + + When tox is invoked with the ``--no-provision`` flag, + the provision won't be attempted, tox will fail instead. + .. conf:: toxworkdir ^ PATH ^ {toxinidir}/.tox Directory for tox to generate its environments into, will be created if it does not exist. diff --git a/src/tox/config/__init__.py b/src/tox/config/__init__.py index 10bc9bef..278d4b2b 100644 --- a/src/tox/config/__init__.py +++ b/src/tox/config/__init__.py @@ -1,7 +1,9 @@ from __future__ import print_function import argparse +import io import itertools +import json import os import random import re @@ -302,7 +304,7 @@ def parseconfig(args, plugins=()): def get_py_project_toml(path): - with open(str(path)) as file_handler: + with io.open(str(path), encoding="UTF-8") as file_handler: config_data = toml.load(file_handler) return config_data @@ -572,6 +574,16 @@ def tox_addoption(parser): action="store_true", help="override alwayscopy setting to True in all envs", ) + parser.add_argument( + "--no-provision", + action="store", + nargs="?", + default=False, + const=True, + metavar="REQUIRES_JSON", + help="do not perform provision, but fail and if a path was provided " + "write provision metadata as JSON to it", + ) cli_skip_missing_interpreter(parser) parser.add_argument("--workdir", metavar="PATH", help="tox working directory") @@ -1318,8 +1330,8 @@ class ParseIni(object): # raise on unknown args self.config._parser.parse_cli(args=self.config.args, strict=True) - @staticmethod - def ensure_requires_satisfied(config, requires, min_version): + @classmethod + def ensure_requires_satisfied(cls, config, requires, min_version): missing_requirements = [] failed_to_parse = False deps = [] @@ -1346,12 +1358,33 @@ class ParseIni(object): missing_requirements.append(str(requirements.Requirement(require))) if failed_to_parse: raise tox.exception.BadRequirement() + if config.option.no_provision and missing_requirements: + msg = "provisioning explicitly disabled within {}, but missing {}" + if config.option.no_provision is not True: # it's a path + msg += " and wrote to {}" + cls.write_requires_to_json_file(config) + raise tox.exception.Error( + msg.format(sys.executable, missing_requirements, config.option.no_provision) + ) if WITHIN_PROVISION and missing_requirements: msg = "break infinite loop provisioning within {} missing {}" raise tox.exception.Error(msg.format(sys.executable, missing_requirements)) config.run_provision = bool(len(missing_requirements)) return deps + @staticmethod + def write_requires_to_json_file(config): + requires_dict = { + "minversion": config.minversion, + "requires": config.requires, + } + try: + with open(config.option.no_provision, "w", encoding="utf-8") as outfile: + json.dump(requires_dict, outfile, indent=4) + except TypeError: # Python 2 + with open(config.option.no_provision, "w") as outfile: + json.dump(requires_dict, outfile, indent=4, encoding="utf-8") + def parse_build_isolation(self, config, reader): config.isolated_build = reader.getbool("isolated_build", False) config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
tox-dev/tox
a586b2a9d26c08e4dcdf4171ebae8079f5707e45
diff --git a/src/tox/_pytestplugin.py b/src/tox/_pytestplugin.py index c9176348..d0c87033 100644 --- a/src/tox/_pytestplugin.py +++ b/src/tox/_pytestplugin.py @@ -491,7 +491,14 @@ def create_files(base, filedefs): create_files(base.ensure(key, dir=1), value) elif isinstance(value, six.string_types): s = textwrap.dedent(value) - base.join(key).write(s) + + if not isinstance(s, six.text_type): + if not isinstance(s, six.binary_type): + s = str(s) + else: + s = six.ensure_text(s) + + base.join(key).write_text(s, encoding="UTF-8") @pytest.fixture() diff --git a/tests/unit/config/test_config.py b/tests/unit/config/test_config.py index fe11224c..b4bd24fb 100644 --- a/tests/unit/config/test_config.py +++ b/tests/unit/config/test_config.py @@ -1,3 +1,4 @@ +# coding=utf-8 import os import re import sys @@ -3556,7 +3557,10 @@ def test_config_via_pyproject_legacy(initproj): initproj( "config_via_pyproject_legacy-0.5", filedefs={ - "pyproject.toml": ''' + "pyproject.toml": u''' + [project] + description = "Factory ⸻ A code generator 🏭" + authors = [{name = "Łukasz Langa"}] [tool.tox] legacy_tox_ini = """ [tox] diff --git a/tests/unit/config/test_config_parallel.py b/tests/unit/config/test_config_parallel.py index 785b7710..0e42a2c5 100644 --- a/tests/unit/config/test_config_parallel.py +++ b/tests/unit/config/test_config_parallel.py @@ -38,7 +38,7 @@ def test_parallel_number_negative(newconfig, capsys): assert "value must be positive" in err -def test_depends(newconfig, capsys): +def test_depends(newconfig): config = newconfig( """\ [tox] @@ -49,7 +49,7 @@ def test_depends(newconfig, capsys): assert config.envconfigs["py"].depends == ("py37", "py36") -def test_depends_multi_row_facotr(newconfig, capsys): +def test_depends_multi_row_facotr(newconfig): config = newconfig( """\ [tox] @@ -61,7 +61,7 @@ def test_depends_multi_row_facotr(newconfig, capsys): assert config.envconfigs["py"].depends == ("py37", "py36-a", "py36-b") -def test_depends_factor(newconfig, capsys): +def test_depends_factor(newconfig): config = newconfig( """\ [tox] diff --git a/tests/unit/session/test_provision.py b/tests/unit/session/test_provision.py index cb7bd9b5..cf2ded10 100644 --- a/tests/unit/session/test_provision.py +++ b/tests/unit/session/test_provision.py @@ -1,5 +1,6 @@ from __future__ import absolute_import, unicode_literals +import json import os import shutil import subprocess @@ -185,6 +186,99 @@ def test_provision_cli_args_not_ignored_if_provision_false(cmd, initproj): result.assert_fail(is_run_test_env=False) +parametrize_json_path = pytest.mark.parametrize("json_path", [None, "missing.json"]) + + +@parametrize_json_path +def test_provision_does_not_fail_with_no_provision_no_reason(cmd, initproj, json_path): + p = initproj("test-0.1", {"tox.ini": "[tox]"}) + result = cmd("--no-provision", *([json_path] if json_path else [])) + result.assert_success(is_run_test_env=True) + assert not (p / "missing.json").exists() + + +@parametrize_json_path +def test_provision_fails_with_no_provision_next_tox(cmd, initproj, next_tox_major, json_path): + p = initproj( + "test-0.1", + { + "tox.ini": """\ + [tox] + minversion = {} + """.format( + next_tox_major, + ) + }, + ) + result = cmd("--no-provision", *([json_path] if json_path else [])) + result.assert_fail(is_run_test_env=False) + if json_path: + missing = json.loads((p / json_path).read_text("utf-8")) + assert missing["minversion"] == next_tox_major + + +@parametrize_json_path +def test_provision_fails_with_no_provision_missing_requires(cmd, initproj, json_path): + p = initproj( + "test-0.1", + { + "tox.ini": """\ + [tox] + requires = + virtualenv > 99999999 + """ + }, + ) + result = cmd("--no-provision", *([json_path] if json_path else [])) + result.assert_fail(is_run_test_env=False) + if json_path: + missing = json.loads((p / json_path).read_text("utf-8")) + assert missing["requires"] == ["virtualenv > 99999999"] + + +@parametrize_json_path +def test_provision_does_not_fail_with_satisfied_requires(cmd, initproj, next_tox_major, json_path): + p = initproj( + "test-0.1", + { + "tox.ini": """\ + [tox] + minversion = 0 + requires = + setuptools > 2 + pip > 3 + """ + }, + ) + result = cmd("--no-provision", *([json_path] if json_path else [])) + result.assert_success(is_run_test_env=True) + assert not (p / "missing.json").exists() + + +@parametrize_json_path +def test_provision_fails_with_no_provision_combined(cmd, initproj, next_tox_major, json_path): + p = initproj( + "test-0.1", + { + "tox.ini": """\ + [tox] + minversion = {} + requires = + setuptools > 2 + pip > 3 + """.format( + next_tox_major, + ) + }, + ) + result = cmd("--no-provision", *([json_path] if json_path else [])) + result.assert_fail(is_run_test_env=False) + if json_path: + missing = json.loads((p / json_path).read_text("utf-8")) + assert missing["minversion"] == next_tox_major + assert missing["requires"] == ["setuptools > 2", "pip > 3"] + + @pytest.fixture(scope="session") def wheel(tmp_path_factory): """create a wheel for a project"""
Possible UnicodeError caused by missing encoding="utf-8" https://github.com/tox-dev/tox/blob/555f3f13b18da1470b6518be654e3e4e2fdec654/src/tox/config/__init__.py#L304-L306 toml must be encoded by UTF-8. Please add `encoding="utf-8"` here. It may cause UnicodeDecodeError when pyproject.toml contains non-ASCII character and locale encoding is not UTF-8 (e.g. Windows). https://github.com/tox-dev/tox/blob/555f3f13b18da1470b6518be654e3e4e2fdec654/src/tox/util/stdlib.py#L52-L55 On Windows, stdout is UTF-8 encoded when it is console (e.g. _WinConsoleIO), but the default encoding of TemporaryFile() is legacy encoding. So UTF-8 is better encoding for this purpose. This is not a real bug because this function is used only here: https://github.com/tox-dev/tox/blob/cfe38bb853ff8573817c640a158b87eaad5b6e01/src/tox/session/__init__.py#L54
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/session/test_provision.py::test_provision_fails_with_no_provision_combined[missing.json]", "tests/unit/session/test_provision.py::test_provision_does_not_fail_with_no_provision_no_reason[missing.json]", "tests/unit/session/test_provision.py::test_provision_does_not_fail_with_satisfied_requires[None]", "tests/unit/session/test_provision.py::test_provision_does_not_fail_with_no_provision_no_reason[None]", "tests/unit/session/test_provision.py::test_provision_fails_with_no_provision_missing_requires[missing.json]", "tests/unit/session/test_provision.py::test_provision_does_not_fail_with_satisfied_requires[missing.json]", "tests/unit/session/test_provision.py::test_provision_fails_with_no_provision_next_tox[missing.json]" ]
[ "tests/unit/config/test_config.py::TestGetcontextname::test_hudson_legacy", "tests/unit/config/test_config.py::TestGetcontextname::test_jenkins", "tests/unit/config/test_config.py::TestGetcontextname::test_blank", "tests/unit/config/test_config.py::TestConfigPackage::test_defaults_changed_dir", "tests/unit/config/test_config.py::TestConfigPackage::test_defaults_distshare", "tests/unit/config/test_config.py::TestConfigPackage::test_project_paths", "tests/unit/config/test_config.py::TestConfigPackage::test_defaults", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep[:]", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep[;]", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_dirsep_regex", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_dirsep_replace[\\\\\\\\]", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_dirsep_replace[\\\\]", "tests/unit/config/test_config.py::TestVenvConfig::test_force_dep_with_url", "tests/unit/config/test_config.py::TestVenvConfig::test_envdir_set_manually", "tests/unit/config/test_config.py::TestVenvConfig::test_suicide_interrupt_terminate_timeout_set_manually", "tests/unit/config/test_config.py::TestVenvConfig::test_envdir_set_manually_setup_cfg", "tests/unit/config/test_config.py::TestVenvConfig::test_process_deps", "tests/unit/config/test_config.py::TestVenvConfig::test_config_parsing_multienv", "tests/unit/config/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions", "tests/unit/config/test_config.py::TestVenvConfig::test_force_dep_version", "tests/unit/config/test_config.py::TestVenvConfig::test_config_parsing_minimal", "tests/unit/config/test_config.py::TestVenvConfig::test_is_same_dep", "tests/unit/config/test_config.py::TestGlobalOptions::test_no_implicit_venv_from_cli_with_envlist", "tests/unit/config/test_config.py::TestGlobalOptions::test_notest", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false", "tests/unit/config/test_config.py::TestGlobalOptions::test_sdist_specification", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_true", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args2-2]", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args0-0]", "tests/unit/config/test_config.py::TestGlobalOptions::test_envlist_multiline", "tests/unit/config/test_config.py::TestGlobalOptions::test_env_selection_expanded_envlist", "tests/unit/config/test_config.py::TestGlobalOptions::test_substitution_jenkins_default", "tests/unit/config/test_config.py::TestGlobalOptions::test_py_venv", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args3-2]", "tests/unit/config/test_config.py::TestGlobalOptions::test_correct_basepython_chosen_from_default_factors", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args1-1]", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_not_specified", "tests/unit/config/test_config.py::TestGlobalOptions::test_defaultenv_commandline", "tests/unit/config/test_config.py::TestGlobalOptions::test_envlist_expansion", "tests/unit/config/test_config.py::TestGlobalOptions::test_substitution_jenkins_context", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args2-2]", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args0-0]", "tests/unit/config/test_config.py::TestGlobalOptions::test_defaultenv_partial_override", "tests/unit/config/test_config.py::TestGlobalOptions::test_envlist_cross_product", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_no_arg", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args3-2]", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args4-3]", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args4-3]", "tests/unit/config/test_config.py::TestGlobalOptions::test_env_selection_with_section_name", "tests/unit/config/test_config.py::TestGlobalOptions::test_substitution_jenkins_global", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_false", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args1-1]", "tests/unit/config/test_config.py::TestIniParserPrefix::test_other_section_substitution", "tests/unit/config/test_config.py::TestIniParserPrefix::test_fallback_sections", "tests/unit/config/test_config.py::TestIniParserPrefix::test_basic_section_access", "tests/unit/config/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substitution", "tests/unit/config/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution", "tests/unit/config/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails", "tests/unit/config/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors_in_boolean", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_default_factors_conflict_lying_name", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factor_ops", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors_in_setenv", "tests/unit/config/test_config.py::TestConfigTestEnv::test_sitepackages_switch", "tests/unit/config/test_config.py::TestConfigTestEnv::test_install_command_setting", "tests/unit/config/test_config.py::TestConfigTestEnv::test_multilevel_substitution", "tests/unit/config/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_positional", "tests/unit/config/test_config.py::TestConfigTestEnv::test_install_command_substitutions", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_defaults", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_defaults", "tests/unit/config/test_config.py::TestConfigTestEnv::test_period_in_factor", "tests/unit/config/test_config.py::TestConfigTestEnv::test_ignore_errors", "tests/unit/config/test_config.py::TestConfigTestEnv::test_specific_command_overrides", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors", "tests/unit/config/test_config.py::TestConfigTestEnv::test_rewrite_posargs", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors_groups_touch", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envconfigs_based_on_factors", "tests/unit/config/test_config.py::TestConfigTestEnv::test_allowlist_externals", "tests/unit/config/test_config.py::TestConfigTestEnv::test_commentchars_issue33", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue515", "tests/unit/config/test_config.py::TestConfigTestEnv::test_regression_test_issue_706[envlist0]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env", "tests/unit/config/test_config.py::TestConfigTestEnv::test_changedir", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_no_spinner", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_nested_env_defaults", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir", "tests/unit/config/test_config.py::TestConfigTestEnv::test_do_not_substitute_more_than_needed", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_double", "tests/unit/config/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages", "tests/unit/config/test_config.py::TestConfigTestEnv::test_default_factors", "tests/unit/config/test_config.py::TestConfigTestEnv::test_install_command_substitutions_other_section", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_from_global_env", "tests/unit/config/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop", "tests/unit/config/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton", "tests/unit/config/test_config.py::TestConfigTestEnv::test_changedir_override", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factor_use_not_checked", "tests/unit/config/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors_support_curly_braces", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_default_factors_conflict", "tests/unit/config/test_config.py::TestConfigTestEnv::test_simple", "tests/unit/config/test_config.py::TestConfigTestEnv::test_pip_pre", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factor_expansion", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_with_factor", "tests/unit/config/test_config.py::TestConfigTestEnv::test_ignore_outcome", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_error", "tests/unit/config/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_curly_braces_in_setenv", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces", "tests/unit/config/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set", "tests/unit/config/test_config.py::TestCommandParser::test_command_parsing_for_issue_10", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_for_multiple_words", "tests/unit/config/test_config.py::TestCommandParser::test_commands_with_backslash", "tests/unit/config/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_for_word", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_for_posargs", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_command_contains_hash", "tests/unit/config/test_config.py::TestIniParser::test_missing_env_sub_populates_missing_subs", "tests/unit/config/test_config.py::TestIniParser::test_getbool", "tests/unit/config/test_config.py::TestIniParser::test_value_doesn_match_section_substitution", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_quoting_in_command", "tests/unit/config/test_config.py::TestIniParser::test_getpath", "tests/unit/config/test_config.py::TestIniParser::test_expand_section_name", "tests/unit/config/test_config.py::TestIniParser::test_substitution_empty", "tests/unit/config/test_config.py::TestIniParser::test_getstring_other_section_substitution", "tests/unit/config/test_config.py::TestIniParser::test_missing_env_sub_raises_config_error_in_non_testenv", "tests/unit/config/test_config.py::TestIniParser::test_getstring_single", "tests/unit/config/test_config.py::TestIniParser::test_substitution_with_multiple_words", "tests/unit/config/test_config.py::TestIniParser::test_getlist", "tests/unit/config/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_multiline", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_positional_substitution", "tests/unit/config/test_config.py::TestIniParser::test_value_matches_section_substitution", "tests/unit/config/test_config.py::TestIniParser::test_getargv", "tests/unit/config/test_config.py::TestIniParser::test_getdict", "tests/unit/config/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310", "tests/unit/config/test_config.py::TestIniParser::test_substitution_colon_prefix", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_quoted_posargs", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_windows_escaping", "tests/unit/config/test_config.py::TestIniParser::test_getstring_substitution", "tests/unit/config/test_config.py::TestIniParser::test_normal_env_sub_works", "tests/unit/config/test_config.py::TestIniParser::test_missing_substitution", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_comment_after_command", "tests/unit/config/test_config.py::TestIniParser::test_getstring_fallback_sections", "tests/unit/config/test_config.py::TestIniParser::test_argvlist", "tests/unit/config/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default", "tests/unit/config/test_config.py::TestCmdInvocation::test_no_tox_ini", "tests/unit/config/test_config.py::TestCmdInvocation::test_version_no_plugins", "tests/unit/config/test_config.py::TestCmdInvocation::test_version_simple", "tests/unit/config/test_config.py::TestCmdInvocation::test_help", "tests/unit/config/test_config.py::TestCmdInvocation::test_version_with_normal_plugin", "tests/unit/config/test_config.py::TestCmdInvocation::test_version_with_fileless_module", "tests/unit/config/test_config.py::TestHashseedOption::test_setenv", "tests/unit/config/test_config.py::TestHashseedOption::test_noset", "tests/unit/config/test_config.py::TestHashseedOption::test_passing_integer", "tests/unit/config/test_config.py::TestHashseedOption::test_noset_with_setenv", "tests/unit/config/test_config.py::TestHashseedOption::test_passing_string", "tests/unit/config/test_config.py::TestHashseedOption::test_one_random_hashseed", "tests/unit/config/test_config.py::TestHashseedOption::test_default", "tests/unit/config/test_config.py::TestHashseedOption::test_passing_empty_string", "tests/unit/config/test_config.py::TestHashseedOption::test_setenv_in_one_testenv", "tests/unit/config/test_config.py::TestHashseedOption::test_passing_no_argument", "tests/unit/config/test_config.py::test_get_homedir", "tests/unit/config/test_config.py::test_env_spec[-e", "tests/unit/config/test_config.py::test_config_no_version_data_in__name", "tests/unit/config/test_config.py::test_config_bad_pyproject_specified", "tests/unit/config/test_config.py::test_interactive_available", "tests/unit/config/test_config.py::test_config_via_pyproject_legacy", "tests/unit/config/test_config.py::test_provision_tox_env_cannot_be_in_envlist", "tests/unit/config/test_config.py::test_posargs_relative_changedir", "tests/unit/config/test_config.py::test_overwrite_skip_install_override", "tests/unit/config/test_config.py::test_interactive", "tests/unit/config/test_config.py::test_config_bad_config_type_specified", "tests/unit/config/test_config.py::test_config_file_not_required_with_devenv", "tests/unit/config/test_config.py::test_config_current_py", "tests/unit/config/test_config.py::test_isolated_build_ignores[deps-crazy-default0]", "tests/unit/config/test_config.py::test_interactive_na", "tests/unit/config/test_config.py::test_isolated_build_overrides", "tests/unit/config/test_config.py::test_config_setup_cfg_no_tox_section", "tests/unit/config/test_config.py::test_isolated_build_env_cannot_be_in_envlist", "tests/unit/config/test_config.py::test_isolated_build_ignores[sitepackages-True-False]", "tests/unit/config/test_config.py::TestParseconfig::test_search_parents", "tests/unit/config/test_config.py::TestParseconfig::test_workdir_gets_resolved", "tests/unit/config/test_config.py::TestParseconfig::test_explicit_config_path", "tests/unit/config/test_config.py::TestParseEnv::test_parse_recreate", "tests/unit/config/test_config.py::TestIndexServer::test_indexserver", "tests/unit/config/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers", "tests/unit/config/test_config.py::TestIndexServer::test_parse_indexserver", "tests/unit/config/test_config.py::TestConfigPlatform::test_platform_install_command", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[osx]", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform_rex", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_posargs_with_spaced_colon", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_posargs", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_multi_env", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_recursion_error_same_section", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_setenv", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_simple", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_recursion_error_other_section", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_posargs_with_colon", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_other_section", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_recursion_error_unnecessary", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_complex", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_regression_issue595", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_inherit", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_posargs_with_colon", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_default_escape", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution", "tests/unit/config/test_config.py::TestSetenv::test_setenv_overrides", "tests/unit/config/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice", "tests/unit/config/test_config.py::TestSetenv::test_setenv_ordering_1", "tests/unit/config/test_config.py::TestSetenv::test_setenv_recursive_direct_without_default", "tests/unit/config/test_config.py::TestSetenv::test_setenv_uses_os_environ", "tests/unit/config/test_config.py::TestSetenv::test_setenv_comment", "tests/unit/config/test_config.py::TestSetenv::test_getdict_lazy", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[None-False]", "tests/unit/config/test_config.py::TestSetenv::test_setenv_recursive_direct_with_default", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[\\nMAGIC", "tests/unit/config/test_config.py::TestSetenv::test_setenv_uses_other_setenv", "tests/unit/config/test_config.py::TestSetenv::test_setenv_default_os_environ", "tests/unit/config/test_config.py::TestSetenv::test_setenv_recursive_direct_with_default_nested", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[MAGIC=yes-True]", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[\\n-False]", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[#MAGIC", "tests/unit/config/test_config.py::TestSetenv::test_setenv_cross_section_mixed", "tests/unit/config/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython", "tests/unit/config/test_config.py::TestSetenv::test_getdict_lazy_update", "tests/unit/config/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294", "tests/unit/session/test_provision.py::test_provision_fails_with_no_provision_missing_requires[None]", "tests/unit/session/test_provision.py::test_provision_basepython_local", "tests/unit/session/test_provision.py::test_provision_config_empty_minversion_and_requires", "tests/unit/session/test_provision.py::test_provision_fails_with_no_provision_next_tox[None]", "tests/unit/session/test_provision.py::test_provision_cli_args_not_ignored_if_provision_false", "tests/unit/session/test_provision.py::test_provision_requirement_with_environment_marker", "tests/unit/session/test_provision.py::test_provision_cli_args_ignore", "tests/unit/session/test_provision.py::test_provision_non_canonical_dep", "tests/unit/session/test_provision.py::test_provision_config_has_minversion_and_requires", "tests/unit/session/test_provision.py::test_provision_basepython_global_only", "tests/unit/session/test_provision.py::test_provision_tox_change_name", "tests/unit/session/test_provision.py::test_provision_bad_requires", "tests/unit/session/test_provision.py::test_provision_fails_with_no_provision_combined[None]", "tests/unit/session/test_provision.py::test_provision_min_version_is_requires", "tests/unit/config/test_config_parallel.py::test_parallel_number_negative", "tests/unit/config/test_config_parallel.py::test_depends", "tests/unit/config/test_config_parallel.py::test_parallel_all", "tests/unit/config/test_config_parallel.py::test_parallel_number", "tests/unit/config/test_config_parallel.py::test_depends_multi_row_facotr", "tests/unit/config/test_config_parallel.py::test_depends_factor", "tests/unit/config/test_config_parallel.py::test_parallel_live_on", "tests/unit/config/test_config_parallel.py::test_parallel_auto", "tests/unit/config/test_config_parallel.py::test_parallel_default" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-03-01T23:04:15Z"
mit
tox-dev__tox-1954
diff --git a/docs/changelog/1928.feature.rst b/docs/changelog/1928.feature.rst new file mode 100644 index 00000000..1f53b09b --- /dev/null +++ b/docs/changelog/1928.feature.rst @@ -0,0 +1,2 @@ +Implemented ``[]`` substitution (alias for ``{posargs}``) - by +:user:`hexagonrecursion`. diff --git a/src/tox/config/loader/ini/replace.py b/src/tox/config/loader/ini/replace.py index 91d3c032..505519ce 100644 --- a/src/tox/config/loader/ini/replace.py +++ b/src/tox/config/loader/ini/replace.py @@ -26,10 +26,9 @@ def replace(conf: "Config", name: Optional[str], loader: "IniLoader", value: str # perform all non-escaped replaces start, end = 0, 0 while True: - start, end, match = find_replace_part(value, start, end) - if not match: + start, end, to_replace = find_replace_part(value, start, end) + if to_replace is None: break - to_replace = value[start + 1 : end] replaced = _replace_match(conf, name, loader, to_replace, chain.copy()) if replaced is None: # if we cannot replace, keep what was there, and continue looking for additional replaces following @@ -38,17 +37,39 @@ def replace(conf: "Config", name: Optional[str], loader: "IniLoader", value: str start = end = end + 1 continue new_value = value[:start] + replaced + value[end + 1 :] - start, end = 0, 0 # if we performed a replace start over + start, end = 0, 0 # if we performed a replacement start over if new_value == value: # if we're not making progress stop (circular reference?) break value = new_value # remove escape sequences value = value.replace("\\{", "{") value = value.replace("\\}", "}") + value = value.replace("\\[", "[") + value = value.replace("\\]", "]") return value -def find_replace_part(value: str, start: int, end: int) -> Tuple[int, int, bool]: +def find_replace_part(value: str, start: int, end: int) -> Tuple[int, int, Optional[str]]: + bracket_at = find_brackets(value, end) + if bracket_at != -1: + return bracket_at, bracket_at + 1, "posargs" # brackets is an alias for positional arguments + start, end, match = find_braces(value, start, end) + return start, end, (value[start + 1 : end] if match else None) + + +def find_brackets(value: str, end: int) -> int: + while True: + pos = value.find("[]", end) + if pos == -1: + break + if pos >= 1 and value[pos - 1] == "\\": # the opened bracket is escaped + end = pos + 1 + continue + break + return pos + + +def find_braces(value: str, start: int, end: int) -> Tuple[int, int, bool]: match = False while end != -1: end = value.find("}", end)
tox-dev/tox
11b150eaa0b43461910847092afa49a083851b98
diff --git a/tests/config/loader/ini/replace/test_replace_posargs.py b/tests/config/loader/ini/replace/test_replace_posargs.py index 20f4091f..d2ebed4b 100644 --- a/tests/config/loader/ini/replace/test_replace_posargs.py +++ b/tests/config/loader/ini/replace/test_replace_posargs.py @@ -5,23 +5,27 @@ import pytest from tests.config.loader.ini.replace.conftest import ReplaceOne -def test_replace_pos_args_none_sys_argv(replace_one: ReplaceOne) -> None: - result = replace_one("{posargs}", None) [email protected]("syntax", ["{posargs}", "[]"]) +def test_replace_pos_args_none_sys_argv(syntax: str, replace_one: ReplaceOne) -> None: + result = replace_one(syntax, None) assert result == "" -def test_replace_pos_args_empty_sys_argv(replace_one: ReplaceOne) -> None: - result = replace_one("{posargs}", []) [email protected]("syntax", ["{posargs}", "[]"]) +def test_replace_pos_args_empty_sys_argv(syntax: str, replace_one: ReplaceOne) -> None: + result = replace_one(syntax, []) assert result == "" -def test_replace_pos_args_extra_sys_argv(replace_one: ReplaceOne) -> None: - result = replace_one("{posargs}", [sys.executable, "magic"]) [email protected]("syntax", ["{posargs}", "[]"]) +def test_replace_pos_args_extra_sys_argv(syntax: str, replace_one: ReplaceOne) -> None: + result = replace_one(syntax, [sys.executable, "magic"]) assert result == f"{sys.executable} magic" -def test_replace_pos_args(replace_one: ReplaceOne) -> None: - result = replace_one("{posargs}", ["ok", "what", " yes "]) [email protected]("syntax", ["{posargs}", "[]"]) +def test_replace_pos_args(syntax: str, replace_one: ReplaceOne) -> None: + result = replace_one(syntax, ["ok", "what", " yes "]) quote = '"' if sys.platform == "win32" else "'" assert result == f"ok what {quote} yes {quote}" @@ -31,8 +35,8 @@ def test_replace_pos_args(replace_one: ReplaceOne) -> None: [ ("magic", "magic"), ("magic:colon", "magic:colon"), - ("magic\n b:c", "magic\nb:c"), # unescaped newline keeps the newline - ("magi\\\n c:d", "magic:d"), # escaped newline merges the lines + ("magic\n b:c", "magic\nb:c"), # an unescaped newline keeps the newline + ("magi\\\n c:d", "magic:d"), # an escaped newline merges the lines ("\\{a\\}", "{a}"), # escaped curly braces ], ) @@ -50,9 +54,24 @@ def test_replace_pos_args_default(replace_one: ReplaceOne, value: str, result: s "\\{posargs\\}", "{\\{posargs}", "{\\{posargs}{}", + "\\[]", + "[\\]", + "\\[\\]", ], ) def test_replace_pos_args_escaped(replace_one: ReplaceOne, value: str) -> None: result = replace_one(value, None) - outcome = value.replace("\\{", "{").replace("\\}", "}") + outcome = value.replace("\\", "") + assert result == outcome + + [email protected]( + ("value", "result"), + [ + ("[]-{posargs}", "foo-foo"), + ("{posargs}-[]", "foo-foo"), + ], +) +def test_replace_mixed_brackets_and_braces(replace_one: ReplaceOne, value: str, result: str) -> None: + outcome = replace_one(value, ["foo"]) assert result == outcome
tox 4 bracket posargs are missing as example i cut down the apipkg tox.ini to just ``` [testenv] deps=pytest commands=pytest [] ``` ran ``` $ tox4 py: install_deps> python -I -m pip install pytest .pkg: get_requires_for_build_wheel> python /home/rpfannsc/.local/lib/python3.9/site-packages/tox/util/pep517/backend.py True setuptools.build_meta __legacy__ .pkg: prepare_metadata_for_build_wheel> python /home/rpfannsc/.local/lib/python3.9/site-packages/tox/util/pep517/backend.py True setuptools.build_meta __legacy__ .pkg: get_requires_for_build_sdist> python /home/rpfannsc/.local/lib/python3.9/site-packages/tox/util/pep517/backend.py True setuptools.build_meta __legacy__ .pkg: build_sdist> python /home/rpfannsc/.local/lib/python3.9/site-packages/tox/util/pep517/backend.py True setuptools.build_meta __legacy__ py: install_package> python -I -m pip install --no-deps --force-reinstall /home/rpfannsc/Projects/pytest-dev/apipkg/.tox/4/.pkg/dist/apipkg-1.6.dev16+ge54efbf.d20210227.tar.gz py: commands[0]> pytest '[]' ERROR: file or directory not found: [] ========================================================================================= test session starts ========================================================================================== platform linux -- Python 3.9.1, pytest-6.2.2, py-1.10.0, pluggy-0.13.1 apipkg full install version=1.6.dev16+ge54efbf.d20210227 rootdir: /home/rpfannsc/Projects/pytest-dev/apipkg collected 0 items ======================================================================================== no tests ran in 0.00s ========================================================================================= py: exit 4 (0.34 seconds) /home/rpfannsc/Projects/pytest-dev/apipkg> pytest '[]' pid=1183180 .pkg: _exit> python /home/rpfannsc/.local/lib/python3.9/site-packages/tox/util/pep517/backend.py True setuptools.build_meta __legacy__ py: FAIL code 4 (9.90=setup[9.57]+cmd[0.34] seconds) evaluation failed :( (10.00 seconds) ``` and ran ``` $ tox GLOB sdist-make: /home/rpfannsc/Projects/pytest-dev/apipkg/setup.py python create: /home/rpfannsc/Projects/pytest-dev/apipkg/.tox/python python installdeps: pytest python inst: /home/rpfannsc/Projects/pytest-dev/apipkg/.tox/.tmp/package/1/apipkg-1.6.dev16+ge54efbf.d20210227.zip python installed: apipkg @ file:///home/rpfannsc/Projects/pytest-dev/apipkg/.tox/.tmp/package/1/apipkg-1.6.dev16%2Bge54efbf.d20210227.zip,attrs==20.3.0,iniconfig==1.1.1,packaging==20.9,pluggy==0.13.1,py==1.10.0,pyparsing==2.4.7,pytest==6.2.2,toml==0.10.2 python run-test-pre: PYTHONHASHSEED='1155200767' python run-test: commands[0] | pytest ========================================================================================= test session starts ========================================================================================== platform linux -- Python 3.9.1, pytest-6.2.2, py-1.10.0, pluggy-0.13.1 cachedir: .tox/python/.pytest_cache apipkg full install version=1.6.dev16+ge54efbf.d20210227 rootdir: /home/rpfannsc/Projects/pytest-dev/apipkg collected 42 items test_apipkg.py .......................................... [100%] ========================================================================================== 42 passed in 0.63s ========================================================================================== _______________________________________________________________________________________________ summary ________________________________________________________________________________________________ python: commands succeeded congratulations :) ``` and then ran ``` $ tox -- -k aliasmod GLOB sdist-make: /home/rpfannsc/Projects/pytest-dev/apipkg/setup.py python inst-nodeps: /home/rpfannsc/Projects/pytest-dev/apipkg/.tox/.tmp/package/1/apipkg-1.6.dev16+ge54efbf.d20210227.zip python installed: apipkg @ file:///home/rpfannsc/Projects/pytest-dev/apipkg/.tox/.tmp/package/1/apipkg-1.6.dev16%2Bge54efbf.d20210227.zip,attrs==20.3.0,iniconfig==1.1.1,packaging==20.9,pluggy==0.13.1,py==1.10.0,pyparsing==2.4.7,pytest==6.2.2,toml==0.10.2 python run-test-pre: PYTHONHASHSEED='1148858278' python run-test: commands[0] | pytest -k aliasmod ========================================================================================= test session starts ========================================================================================== platform linux -- Python 3.9.1, pytest-6.2.2, py-1.10.0, pluggy-0.13.1 cachedir: .tox/python/.pytest_cache apipkg full install version=1.6.dev16+ge54efbf.d20210227 rootdir: /home/rpfannsc/Projects/pytest-dev/apipkg collected 42 items / 33 deselected / 9 selected test_apipkg.py ......... [100%] =================================================================================== 9 passed, 33 deselected in 0.19s =================================================================================== _______________________________________________________________________________________________ summary ________________________________________________________________________________________________ python: commands succeeded congratulations :) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_none_sys_argv[[]]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_empty_sys_argv[[]]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_extra_sys_argv[[]]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args[[]]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_escaped[\\\\[]]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_escaped[[\\\\]]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_escaped[\\\\[\\\\]]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_mixed_brackets_and_braces[[]-{posargs}-foo-foo]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_mixed_brackets_and_braces[{posargs}-[]-foo-foo]" ]
[ "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_none_sys_argv[{posargs}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_empty_sys_argv[{posargs}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_extra_sys_argv[{posargs}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args[{posargs}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_default[magic-magic]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_default[magic:colon-magic:colon]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_default[magic\\n", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_default[magi\\\\\\n", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_default[\\\\{a\\\\}-{a}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_escaped[\\\\{posargs}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_escaped[{posargs\\\\}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_escaped[\\\\{posargs\\\\}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_escaped[{\\\\{posargs}]", "tests/config/loader/ini/replace/test_replace_posargs.py::test_replace_pos_args_escaped[{\\\\{posargs}{}]" ]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2021-03-03T12:20:42Z"
mit
tox-dev__tox-2131
diff --git a/CONTRIBUTORS b/CONTRIBUTORS index e794efc5..14d9bbad 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -99,6 +99,7 @@ Philip Thiem Pierre-Jean Campigotto Pierre-Luc Tessier GagnΓ© Prakhar Gurunani +Rahul Bangar Ronald Evers Ronny Pfannschmidt Ryuichi Ohori diff --git a/docs/changelog/2130.bugfix.rst b/docs/changelog/2130.bugfix.rst new file mode 100644 index 00000000..aac4524c --- /dev/null +++ b/docs/changelog/2130.bugfix.rst @@ -0,0 +1,1 @@ +``get_requires_for_build_sdist`` hook (PEP 517) is assumed to return an empty list if left unimplemented by the backend build system - by :user:`oczkoisse` diff --git a/src/tox/helper/build_requires.py b/src/tox/helper/build_requires.py index aafb258c..a91671c0 100644 --- a/src/tox/helper/build_requires.py +++ b/src/tox/helper/build_requires.py @@ -12,6 +12,13 @@ backend = __import__(backend_spec, fromlist=["_trash"]) if backend_obj: backend = getattr(backend, backend_obj) -for_build_requires = backend.get_requires_for_build_sdist(None) +try: + for_build_requires = backend.get_requires_for_build_sdist(None) +except AttributeError: + # PEP 517 states that get_requires_for_build_sdist is optional for a build + # backend object. When the backend object omits it, the default + # implementation must be equivalent to return [] + for_build_requires = [] + output = json.dumps(for_build_requires) print(output)
tox-dev/tox
a61a6b6bf8713f35e560a2449480a8ea5721bad7
diff --git a/tests/unit/package/builder/test_package_builder_isolated.py b/tests/unit/package/builder/test_package_builder_isolated.py index dd783d85..f05d8ae8 100644 --- a/tests/unit/package/builder/test_package_builder_isolated.py +++ b/tests/unit/package/builder/test_package_builder_isolated.py @@ -202,3 +202,71 @@ def test_isolated_build_script_args(tmp_path): # cannot import build_isolated because of its side effects script_path = os.path.join(os.path.dirname(tox.helper.__file__), "build_isolated.py") subprocess.check_call(("python", script_path, str(tmp_path), "setuptools.build_meta")) + + +def test_isolated_build_backend_missing_hook(initproj, cmd): + """Verify that tox works with a backend missing optional hooks + + PEP 517 allows backends to omit get_requires_for_build_sdist hook, in which + case a default implementation that returns an empty list should be assumed + instead of raising an error. + """ + name = "ensconsproj" + version = "0.1" + src_root = "src" + + initproj( + (name, version), + filedefs={ + "pyproject.toml": """ + [build-system] + requires = ["pytoml>=0.1", "enscons==0.26.0"] + build-backend = "enscons.api" + + [tool.enscons] + name = "{name}" + version = "{version}" + description = "Example enscons project" + license = "MIT" + packages = ["{name}"] + src_root = "{src_root}" + """.format( + name=name, version=version, src_root=src_root + ), + "tox.ini": """ + [tox] + isolated_build = true + """, + "SConstruct": """ + import enscons + + env = Environment( + tools=["default", "packaging", enscons.generate], + PACKAGE_METADATA=dict( + name = "{name}", + version = "{version}" + ), + WHEEL_TAG="py2.py3-none-any" + ) + + py_source = env.Glob("src/{name}/*.py") + + purelib = env.Whl("purelib", py_source, root="{src_root}") + whl = env.WhlFile(purelib) + + sdist = env.SDist(source=FindSourceFiles() + ["PKG-INFO"]) + env.NoClean(sdist) + env.Alias("sdist", sdist) + + develop = env.Command("#DEVELOP", enscons.egg_info_targets(env), enscons.develop) + env.Alias("develop", develop) + + env.Default(whl, sdist) + """.format( + name=name, version=version, src_root=src_root + ), + }, + ) + + result = cmd("--sdistonly", "-v", "-v", "-e", "py") + assert "scons: done building targets" in result.out, result.out
Isolated build with 'enscons' as backend build system fails I'm trying to use `tox` with `isolated_build` set to `true` and [`enscons`](https://github.com/dholth/enscons) as the backend build system. When `isolated_build` is not set (default is `false` I think), `tox` works fine. However, when `isolated_build` is set to `true`, the build fails with the following error: ``` action: .package, msg: get-build-requires cwd: C:\Users\banga\Projects\Personal\labeling-tool cmd: 'C:\Users\banga\Projects\Personal\labeling-tool\.tox\.package\Scripts\python' '.venv\Lib\site-packages\tox\helper\build_requires.py' enscons.api '' '' Traceback (most recent call last): File ".venv\Lib\site-packages\tox\helper\build_requires.py", line 15, in <module> for_build_requires = backend.get_requires_for_build_sdist(None) AttributeError: module 'enscons.api' has no attribute 'get_requires_for_build_sdist' ``` I might be wrong but I thought `get_requires_for_build_sdist` and `get_requires_for_build_wheel` were optional hooks in PEP 517 and should be assumed to return empty list if absent. The tox configuration in `pyproject.toml` is: ```toml [tool.tox] legacy_tox_ini = """ [tox] envlist = py37, py38, py39 isolated_build = true [testenv] deps = pytest commands = pytest """ ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/package/builder/test_package_builder_isolated.py::test_isolated_build_backend_missing_hook" ]
[ "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_requires", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_no_pyproject_toml", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_requires", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_backend", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_backend_path_outside_root", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_build_system", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_no_backend", "tests/unit/package/builder/test_package_builder_isolated.py::test_package_isolated_toml_bad_backend_path" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-07-31T05:56:48Z"
mit
tox-dev__tox-2146
diff --git a/docs/changelog/763.bugfix.rst b/docs/changelog/763.bugfix.rst new file mode 100644 index 00000000..88cc3e0b --- /dev/null +++ b/docs/changelog/763.bugfix.rst @@ -0,0 +1,1 @@ +Support ``#`` character in path for the tox project - by :user:`gaborbernat`. diff --git a/src/tox/config/loader/ini/__init__.py b/src/tox/config/loader/ini/__init__.py index 5822555e..55d67be3 100644 --- a/src/tox/config/loader/ini/__init__.py +++ b/src/tox/config/loader/ini/__init__.py @@ -29,8 +29,10 @@ class IniLoader(StrConvert, Loader[str]): super().__init__(overrides) def load_raw(self, key: str, conf: Optional["Config"], env_name: Optional[str]) -> str: - value = self._section[key] + return self.process_raw(conf, env_name, self._section[key]) + @staticmethod + def process_raw(conf: Optional["Config"], env_name: Optional[str], value: str) -> str: # strip comments elements: List[str] = [] for line in value.split("\n"): @@ -38,7 +40,6 @@ class IniLoader(StrConvert, Loader[str]): part = _COMMENTS.sub("", line) elements.append(part.replace("\\#", "#")) strip_comments = "\n".join(elements) - if conf is None: # conf is None when we're loading the global tox configuration file for the CLI factor_filtered = strip_comments # we don't support factor and replace functionality there else: diff --git a/src/tox/config/loader/ini/replace.py b/src/tox/config/loader/ini/replace.py index 3e9d193f..06facf61 100644 --- a/src/tox/config/loader/ini/replace.py +++ b/src/tox/config/loader/ini/replace.py @@ -120,9 +120,10 @@ def replace_reference( for src in _config_value_sources(settings["env"], settings["section"], current_env, conf, loader): try: if isinstance(src, SectionProxy): - return src[key] + return loader.process_raw(conf, current_env, src[key]) value = src.load(key, chain) as_str, _ = stringify(value) + as_str = as_str.replace("#", r"\#") # escape comment characters as these will be stripped return as_str except KeyError as exc: # if fails, keep trying maybe another source can satisfy exception = exc diff --git a/src/tox/config/loader/str_convert.py b/src/tox/config/loader/str_convert.py index 01a114e6..ba373af8 100644 --- a/src/tox/config/loader/str_convert.py +++ b/src/tox/config/loader/str_convert.py @@ -47,8 +47,10 @@ class StrConvert(Convert[str]): @staticmethod def to_command(value: str) -> Command: is_win = sys.platform == "win32" + value = value.replace(r"\#", "#") splitter = shlex.shlex(value, posix=not is_win) splitter.whitespace_split = True + splitter.commenters = "" # comments handled earlier, and the shlex does not know escaped comment characters args: List[str] = [] pos = 0 try: diff --git a/src/tox/config/set_env.py b/src/tox/config/set_env.py index ce836516..acfd52fa 100644 --- a/src/tox/config/set_env.py +++ b/src/tox/config/set_env.py @@ -41,6 +41,7 @@ class SetEnv: return self._materialized[item] raw = self._raw[item] result = self.replacer(raw, chain) # apply any replace options + result = result.replace(r"\#", "#") # unroll escaped comment with replacement self._materialized[item] = result self._raw.pop(item, None) # if the replace requires the env we may be called again, so allow pop to fail return result diff --git a/whitelist.txt b/whitelist.txt index c8163050..c5fd2f25 100644 --- a/whitelist.txt +++ b/whitelist.txt @@ -27,6 +27,7 @@ chdir cmd codec colorama +commenters conf configs conftest
tox-dev/tox
174fd08e7db7076e262b96e4a76db49b40fbc620
diff --git a/src/tox/pytest.py b/src/tox/pytest.py index 73123355..1516297c 100644 --- a/src/tox/pytest.py +++ b/src/tox/pytest.py @@ -370,7 +370,9 @@ class ToxRunOutcome: class ToxProjectCreator(Protocol): - def __call__(self, files: Dict[str, Any], base: Optional[Path] = None) -> ToxProject: # noqa: U100 + def __call__( + self, files: Dict[str, Any], base: Optional[Path] = None, prj_path: Optional[Path] = None # noqa: U100 + ) -> ToxProject: ... @@ -378,9 +380,9 @@ class ToxProjectCreator(Protocol): def init_fixture( tmp_path: Path, capfd: CaptureFixture, monkeypatch: MonkeyPatch, mocker: MockerFixture ) -> ToxProjectCreator: - def _init(files: Dict[str, Any], base: Optional[Path] = None) -> ToxProject: + def _init(files: Dict[str, Any], base: Optional[Path] = None, prj_path: Optional[Path] = None) -> ToxProject: """create tox projects""" - return ToxProject(files, base, tmp_path / "p", capfd, monkeypatch, mocker) + return ToxProject(files, base, prj_path or tmp_path / "p", capfd, monkeypatch, mocker) return _init # noqa diff --git a/tests/session/cmd/test_show_config.py b/tests/session/cmd/test_show_config.py index bfeec554..366766ff 100644 --- a/tests/session/cmd/test_show_config.py +++ b/tests/session/cmd/test_show_config.py @@ -1,6 +1,8 @@ import platform import sys from configparser import ConfigParser +from pathlib import Path +from textwrap import dedent from typing import Callable, Tuple import pytest @@ -155,3 +157,27 @@ def test_show_config_description_normalize(tox_project: ToxProjectCreator) -> No outcome = tox_project({"tox.ini": tox_ini}).run("c", "-e", "py", "-k", "description") outcome.assert_success() assert outcome.out == "[testenv:py]\ndescription = A magical pipe of this\n" + + +def test_show_config_ini_comment_path(tox_project: ToxProjectCreator, tmp_path: Path) -> None: + prj_path = tmp_path / "#magic" + prj_path.mkdir() + ini = """ + [testenv] + package = skip + set_env = + A=1 # comment + # more comment + commands = {envpython} -c 'import os; print(os.linesep.join(f"{k}={v}" for k, v in os.environ.items()))' + [testenv:py] + set_env = + {[testenv]set_env} + B = {tox_root} # just some comment + """ + project = tox_project({"tox.ini": dedent(ini)}, prj_path=prj_path) + result = project.run("r", "-e", "py") + result.assert_success() + a_line = next(i for i in result.out.splitlines() if i.startswith("A=")) # pragma: no branch # not found raises + assert a_line == "A=1" + b_line = next(i for i in result.out.splitlines() if i.startswith("B=")) # pragma: no branch # not found raises + assert b_line == f"B={prj_path}"
tox fails when running in a path containing a hash On Archlinux with 2.9.1, when trying to run it in a folder called `test#foo`, I get: ``` $ tox -e flake8 flake8 create: /home/florian/proj/qutebrowser/test#foo/.tox/flake8 flake8 installdeps: -r/home/florian/proj/qutebrowser/test#foo/requirements.txt, -r/home/florian/proj/qutebrowser/test#foo/misc/requirements/requirements-flake8.txt flake8 installed: attrs==17.3.0,colorama==0.3.9,cssutils==1.0.2,flake8==3.5.0,flake8-bugbear==17.12.0,flake8-builtins==1.0.post0,flake8-comprehensions==1.4.1,flake8-copyright==0.2.0,flake8-debugger==3.0.0,flake8-deprecated==1.3,flake8-docstrings==1.1.0,flake8-future-import==0.4.3,flake8-mock==0.3,flake8-per-file-ignores==0.4,flake8-polyfill==1.0.1,flake8-string-format==0.2.3,flake8-tidy-imports==1.1.0,flake8-tuple==0.2.13,Jinja2==2.10,MarkupSafe==1.0,mccabe==0.6.1,pep8-naming==0.4.1,pycodestyle==2.3.1,pydocstyle==2.1.1,pyflakes==1.6.0,Pygments==2.2.0,pyPEG2==2.15.2,PyYAML==3.12,six==1.11.0,snowballstemmer==1.2.1 flake8 runtests: PYTHONHASHSEED='2112495524' flake8 runtests: commands[0] | /home/florian/proj/qutebrowser/test ERROR: invocation failed (errno 2), args: ['/home/florian/proj/qutebrowser/test'], cwd: /home/florian/proj/qutebrowser/test#foo Traceback (most recent call last): File "/bin/tox", line 11, in <module> load_entry_point('tox==2.9.1', 'console_scripts', 'tox')() File "/usr/lib/python3.6/site-packages/tox/session.py", line 40, in main retcode = Session(config).runcommand() File "/usr/lib/python3.6/site-packages/tox/session.py", line 392, in runcommand return self.subcommand_test() File "/usr/lib/python3.6/site-packages/tox/session.py", line 583, in subcommand_test self.runtestenv(venv) File "/usr/lib/python3.6/site-packages/tox/session.py", line 592, in runtestenv self.hook.tox_runtest(venv=venv, redirect=redirect) File "/usr/lib/python3.6/site-packages/pluggy/__init__.py", line 617, in __call__ return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs) File "/usr/lib/python3.6/site-packages/pluggy/__init__.py", line 222, in _hookexec return self._inner_hookexec(hook, methods, kwargs) File "/usr/lib/python3.6/site-packages/pluggy/__init__.py", line 216, in <lambda> firstresult=hook.spec_opts.get('firstresult'), File "/usr/lib/python3.6/site-packages/pluggy/callers.py", line 201, in _multicall return outcome.get_result() File "/usr/lib/python3.6/site-packages/pluggy/callers.py", line 76, in get_result raise ex[1].with_traceback(ex[2]) File "/usr/lib/python3.6/site-packages/pluggy/callers.py", line 180, in _multicall res = hook_impl.function(*args) File "/usr/lib/python3.6/site-packages/tox/venv.py", line 464, in tox_runtest venv.test(redirect=redirect) File "/usr/lib/python3.6/site-packages/tox/venv.py", line 384, in test ignore_ret=ignore_ret, testcommand=True) File "/usr/lib/python3.6/site-packages/tox/venv.py", line 414, in _pcall redirect=redirect, ignore_ret=ignore_ret) File "/usr/lib/python3.6/site-packages/tox/session.py", line 140, in popen stdout=stdout, stderr=subprocess.STDOUT) File "/usr/lib/python3.6/site-packages/tox/session.py", line 228, in _popen stdout=stdout, stderr=stderr, env=env) File "/usr/lib/python3.6/subprocess.py", line 709, in __init__ restore_signals, start_new_session) File "/usr/lib/python3.6/subprocess.py", line 1344, in _execute_child raise child_exception_type(errno_num, err_msg, err_filename) FileNotFoundError: [Errno 2] No such file or directory: '/home/florian/proj/qutebrowser/test': '/home/florian/proj/qutebrowser/test' ``` Nothing too special in tox.ini: ```ini [testenv:flake8] basepython = {env:PYTHON:python3} passenv = deps = -r{toxinidir}/requirements.txt -r{toxinidir}/misc/requirements/requirements-flake8.txt commands = {envpython} -m flake8 {posargs:qutebrowser tests scripts} ``` I'm guessing `{envpython}` gets replaced by the Python path (which contains a `#`) and only after that, comments are stripped out?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/session/cmd/test_show_config.py::test_show_config_ini_comment_path" ]
[ "tests/session/cmd/test_show_config.py::test_show_config_default_run_env", "tests/session/cmd/test_show_config.py::test_show_config_commands", "tests/session/cmd/test_show_config.py::test_show_config_filter_keys", "tests/session/cmd/test_show_config.py::test_show_config_unused", "tests/session/cmd/test_show_config.py::test_show_config_exception", "tests/session/cmd/test_show_config.py::test_pass_env_config_default[True]", "tests/session/cmd/test_show_config.py::test_pass_env_config_default[False]", "tests/session/cmd/test_show_config.py::test_show_config_pkg_env_skip", "tests/session/cmd/test_show_config.py::test_show_config_select_only", "tests/session/cmd/test_show_config.py::test_show_config_alias", "tests/session/cmd/test_show_config.py::test_show_config_description_normalize" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-08-08T10:14:53Z"
mit
tox-dev__tox-2212
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d6dd3e68..8ce71718 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,7 +12,7 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/asottile/pyupgrade - rev: v2.25.0 + rev: v2.25.1 hooks: - id: pyupgrade args: ["--py36-plus"] diff --git a/docs/changelog/2211.bugfix.rst b/docs/changelog/2211.bugfix.rst new file mode 100644 index 00000000..7584fd21 --- /dev/null +++ b/docs/changelog/2211.bugfix.rst @@ -0,0 +1,1 @@ +Fix old-new value on recreate cache miss-match are swapped -- by :user:`gaborbernat`. diff --git a/src/tox/tox_env/python/api.py b/src/tox/tox_env/python/api.py index 2bec85d2..222e14ec 100644 --- a/src/tox/tox_env/python/api.py +++ b/src/tox/tox_env/python/api.py @@ -173,7 +173,7 @@ class Python(ToxEnv, ABC): removed = [f"{k}={v!r}" for k, v in old.items() if k not in conf] if removed: result.append(f"removed {' | '.join(removed)}") - changed = [f"{k}={v!r}->{old[k]!r}" for k, v in conf.items() if k in old and v != old[k]] + changed = [f"{k}={old[k]!r}->{v!r}" for k, v in conf.items() if k in old and v != old[k]] if changed: result.append(f"changed {' | '.join(changed)}") return f'python {", ".join(result)}'
tox-dev/tox
957a280af356575b00ac6bee34fb02919e95766e
diff --git a/src/tox/pytest.py b/src/tox/pytest.py index 265292be..b7d7be7d 100644 --- a/src/tox/pytest.py +++ b/src/tox/pytest.py @@ -398,8 +398,11 @@ def empty_project(tox_project: ToxProjectCreator, monkeypatch: MonkeyPatch) -> T return project +_RUN_INTEGRATION_TEST_FLAG = "--run-integration" + + def pytest_addoption(parser: Parser) -> None: - parser.addoption("--run-integration", action="store_true", help="run the integration tests") + parser.addoption(_RUN_INTEGRATION_TEST_FLAG, action="store_true", help="run the integration tests") def pytest_configure(config: PyTestConfig) -> None: @@ -413,12 +416,12 @@ def pytest_collection_modifyitems(config: PyTestConfig, items: List[Function]) - if len(items) == 1: # pragma: no cover # hard to test return - skip_int = pytest.mark.skip(reason="integration tests not run (no --run-int flag)") + skip_int = pytest.mark.skip(reason=f"integration tests not run (no {_RUN_INTEGRATION_TEST_FLAG} flag)") def is_integration(test_item: Function) -> bool: return test_item.get_closest_marker("integration") is not None - integration_enabled = config.getoption("--run-integration") + integration_enabled = config.getoption(_RUN_INTEGRATION_TEST_FLAG) if not integration_enabled: # pragma: no cover # hard to test for item in items: if is_integration(item): diff --git a/tests/tox_env/python/test_python_api.py b/tests/tox_env/python/test_python_api.py index aa588d44..2204a22a 100644 --- a/tests/tox_env/python/test_python_api.py +++ b/tests/tox_env/python/test_python_api.py @@ -60,7 +60,7 @@ def test_build_wheel_in_non_base_pkg_env( def test_diff_msg_added_removed_changed() -> None: before = {"A": "1", "F": "8", "C": "3", "D": "4", "E": "6"} after = {"G": "9", "B": "2", "C": "3", "D": "5", "E": "7"} - expected = "python added A='1' | F='8', removed G='9' | B='2', changed D='4'->'5' | E='6'->'7'" + expected = "python added A='1' | F='8', removed G='9' | B='2', changed D='5'->'4' | E='7'->'6'" assert Python._diff_msg(before, after) == expected diff --git a/tests/tox_env/python/virtual_env/test_virtualenv_api.py b/tests/tox_env/python/virtual_env/test_virtualenv_api.py index 5398e7f2..490aa295 100644 --- a/tests/tox_env/python/virtual_env/test_virtualenv_api.py +++ b/tests/tox_env/python/virtual_env/test_virtualenv_api.py @@ -119,7 +119,7 @@ def test_recreate_when_virtualenv_changes(tox_project: ToxProjectCreator, mocker mocker.patch.object(api, "virtualenv_version", "1.0") result = proj.run("r") - assert f"recreate env because python changed virtualenv version='1.0'->'{virtualenv_version}'" in result.out + assert f"recreate env because python changed virtualenv version='{virtualenv_version}'->'1.0'" in result.out assert "remove tox env folder" in result.out
tox4 python version change arrow should be inverted ``` type: recreate env because python changed version_info=[3, 9, 7, 'final', 0]->[3, 10, 0, 'candidate', 2] | ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/tox_env/python/test_python_api.py::test_diff_msg_added_removed_changed", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_recreate_when_virtualenv_changes" ]
[ "tests/tox_env/python/test_python_api.py::test_requirements_txt", "tests/tox_env/python/test_python_api.py::test_diff_msg_no_diff", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[magic-pypy-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[magic-pypy-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[magic-py39-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[magic-py39-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[.pkg-py-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[.pkg-py-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[cpython-pypy-pypy-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[cpython-pypy-pypy-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[pypy-cpython-cpython-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[pypy-cpython-cpython-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[pypy2-pypy3-pypy3-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[pypy2-pypy3-pypy3-False]", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_virtualenv_env_ignored_if_set", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_virtualenv_env_used_if_not_set", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_honor_set_env_for_clear_periodic_update", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_pip_pre[True]", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_pip_pre[False]", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_install_command_no_packages", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_list_dependencies_command", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_install_pkg[r]", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_install_pkg[p]", "tests/tox_env/python/virtual_env/test_virtualenv_api.py::test_install_pkg[le]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-09-11T16:31:51Z"
mit
tox-dev__tox-2306
diff --git a/docs/changelog/2183.feature.rst b/docs/changelog/2183.feature.rst new file mode 100644 index 00000000..9ddfb8cf --- /dev/null +++ b/docs/changelog/2183.feature.rst @@ -0,0 +1,2 @@ +Display a hint for unrecognized argument CLI parse failures to use ``--`` separator to pass arguments to commands +- by :user:`gaborbernat`. diff --git a/docs/changelog/2287.doc.rst b/docs/changelog/2287.doc.rst new file mode 100644 index 00000000..40c2ab68 --- /dev/null +++ b/docs/changelog/2287.doc.rst @@ -0,0 +1,1 @@ +Document :meth:`tox.config.sets.ConfigSet.loaders` - by :user:`gaborbernat`. diff --git a/src/tox/config/cli/parser.py b/src/tox/config/cli/parser.py index a7a4e548..a49ce18b 100644 --- a/src/tox/config/cli/parser.py +++ b/src/tox/config/cli/parser.py @@ -76,6 +76,19 @@ class ArgumentParserWithEnvAndConfig(ArgumentParser): raise TypeError(action) return of_type + def parse_args( # type: ignore # avoid defining all overloads + self, + args: Sequence[str] | None = None, + namespace: Namespace | None = None, + ) -> Namespace: + res, argv = self.parse_known_args(args, namespace) + if argv: + self.error( + f'unrecognized arguments: {" ".join(argv)}\n' + "hint: if you tried to pass arguments to a command use -- to separate them from tox ones", + ) + return res + class HelpFormatter(ArgumentDefaultsHelpFormatter): """ diff --git a/src/tox/config/sets.py b/src/tox/config/sets.py index 46864030..5f0df677 100644 --- a/src/tox/config/sets.py +++ b/src/tox/config/sets.py @@ -24,7 +24,7 @@ class ConfigSet(ABC): self._section = section self._env_name = env_name self._conf = conf - self.loaders: list[Loader[Any]] = [] + self.loaders: list[Loader[Any]] = [] #: active configuration loaders, can alter to change configuration values self._defined: dict[str, ConfigDefinition[Any]] = {} self._keys: dict[str, None] = {} self._alias: dict[str, str] = {}
tox-dev/tox
b8307e79a8e6f6e7f1440c22a63765ecf895f208
diff --git a/tests/config/cli/test_parser.py b/tests/config/cli/test_parser.py index 71d515cf..3b965d0d 100644 --- a/tests/config/cli/test_parser.py +++ b/tests/config/cli/test_parser.py @@ -7,7 +7,7 @@ import pytest from pytest_mock import MockerFixture from tox.config.cli.parser import Parsed, ToxParser -from tox.pytest import MonkeyPatch +from tox.pytest import CaptureFixture, MonkeyPatch def test_parser_const_with_default_none(monkeypatch: MonkeyPatch) -> None: @@ -81,3 +81,11 @@ def test_parse_known_args_not_set(mocker: MockerFixture) -> None: parser = ToxParser.base() _, unknown = parser.parse_known_args(None) assert unknown == ["--help"] + + +def test_parser_hint(capsys: CaptureFixture) -> None: + parser = ToxParser.base() + with pytest.raises(SystemExit): + parser.parse_args("foo") + out, err = capsys.readouterr() + assert err.endswith("hint: if you tried to pass arguments to a command use -- to separate them from tox ones\n")
[tox4] Document loaders on config set I'm trying to port our tox extension to the new tox 4 and I have one problem described in this thread: https://github.com/fedora-python/tox-current-env/pull/42#pullrequestreview-799430800 The problem basically is that it's not possible to call `get_env` in `tox_add_env_config` because it causes an endless recursion. I need to do it so I can overwrite some settings loaded from config with values from the extension.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/config/cli/test_parser.py::test_parser_hint" ]
[ "tests/config/cli/test_parser.py::test_parser_const_with_default_none", "tests/config/cli/test_parser.py::test_parser_color[None-None-None-True]", "tests/config/cli/test_parser.py::test_parser_color[None-None-None-False]", "tests/config/cli/test_parser.py::test_parser_color[None-None-0-True]", "tests/config/cli/test_parser.py::test_parser_color[None-None-0-False]", "tests/config/cli/test_parser.py::test_parser_color[None-None-1-True]", "tests/config/cli/test_parser.py::test_parser_color[None-None-1-False]", "tests/config/cli/test_parser.py::test_parser_color[None-0-None-True]", "tests/config/cli/test_parser.py::test_parser_color[None-0-None-False]", "tests/config/cli/test_parser.py::test_parser_color[None-0-0-True]", "tests/config/cli/test_parser.py::test_parser_color[None-0-0-False]", "tests/config/cli/test_parser.py::test_parser_color[None-0-1-True]", "tests/config/cli/test_parser.py::test_parser_color[None-0-1-False]", "tests/config/cli/test_parser.py::test_parser_color[None-1-None-True]", "tests/config/cli/test_parser.py::test_parser_color[None-1-None-False]", "tests/config/cli/test_parser.py::test_parser_color[None-1-0-True]", "tests/config/cli/test_parser.py::test_parser_color[None-1-0-False]", "tests/config/cli/test_parser.py::test_parser_color[None-1-1-True]", "tests/config/cli/test_parser.py::test_parser_color[None-1-1-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-None-None-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-None-None-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-None-0-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-None-0-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-None-1-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-None-1-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-0-None-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-0-None-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-0-0-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-0-0-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-0-1-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-0-1-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-1-None-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-1-None-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-1-0-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-1-0-False]", "tests/config/cli/test_parser.py::test_parser_color[bad-1-1-True]", "tests/config/cli/test_parser.py::test_parser_color[bad-1-1-False]", "tests/config/cli/test_parser.py::test_parser_color[no-None-None-True]", "tests/config/cli/test_parser.py::test_parser_color[no-None-None-False]", "tests/config/cli/test_parser.py::test_parser_color[no-None-0-True]", "tests/config/cli/test_parser.py::test_parser_color[no-None-0-False]", "tests/config/cli/test_parser.py::test_parser_color[no-None-1-True]", "tests/config/cli/test_parser.py::test_parser_color[no-None-1-False]", "tests/config/cli/test_parser.py::test_parser_color[no-0-None-True]", "tests/config/cli/test_parser.py::test_parser_color[no-0-None-False]", "tests/config/cli/test_parser.py::test_parser_color[no-0-0-True]", "tests/config/cli/test_parser.py::test_parser_color[no-0-0-False]", "tests/config/cli/test_parser.py::test_parser_color[no-0-1-True]", "tests/config/cli/test_parser.py::test_parser_color[no-0-1-False]", "tests/config/cli/test_parser.py::test_parser_color[no-1-None-True]", "tests/config/cli/test_parser.py::test_parser_color[no-1-None-False]", "tests/config/cli/test_parser.py::test_parser_color[no-1-0-True]", "tests/config/cli/test_parser.py::test_parser_color[no-1-0-False]", "tests/config/cli/test_parser.py::test_parser_color[no-1-1-True]", "tests/config/cli/test_parser.py::test_parser_color[no-1-1-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-None-None-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-None-None-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-None-0-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-None-0-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-None-1-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-None-1-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-0-None-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-0-None-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-0-0-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-0-0-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-0-1-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-0-1-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-1-None-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-1-None-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-1-0-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-1-0-False]", "tests/config/cli/test_parser.py::test_parser_color[yes-1-1-True]", "tests/config/cli/test_parser.py::test_parser_color[yes-1-1-False]", "tests/config/cli/test_parser.py::test_parser_unsupported_type", "tests/config/cli/test_parser.py::test_sub_sub_command", "tests/config/cli/test_parser.py::test_parse_known_args_not_set" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-01-09T10:50:14Z"
mit
tox-dev__tox-2383
diff --git a/CONTRIBUTORS b/CONTRIBUTORS index f3fc3407..15f58597 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -55,6 +55,7 @@ Isaac Pedisich Itxaka Serrano Jake Windle Jannis Leidel +Jason R. Coombs Jesse Schwartzentruber Joachim Brandon LeBlanc Johannes Christ diff --git a/docs/changelog/2382.feature.rst b/docs/changelog/2382.feature.rst new file mode 100644 index 00000000..59a75852 --- /dev/null +++ b/docs/changelog/2382.feature.rst @@ -0,0 +1,1 @@ +On Windows ``PROGRAMFILES``, ``PROGRAMFILES(X86)``, and ``PROGRAMDATA`` environment variables are now passed through, unmasking system values necessary to locate resources such as a C compiler. diff --git a/docs/config.rst b/docs/config.rst index c579733e..2ca82e3a 100644 --- a/docs/config.rst +++ b/docs/config.rst @@ -469,7 +469,8 @@ Complete list of settings that you can put into ``testenv*`` sections: ``REQUESTS_CA_BUNDLE``, ``SSL_CERT_FILE``, ``HTTP_PROXY``, ``HTTPS_PROXY``, ``NO_PROXY`` * Windows: ``SYSTEMDRIVE``, ``SYSTEMROOT``, ``PATHEXT``, ``TEMP``, ``TMP`` - ``NUMBER_OF_PROCESSORS``, ``USERPROFILE``, ``MSYSTEM`` + ``NUMBER_OF_PROCESSORS``, ``USERPROFILE``, ``MSYSTEM``, + ``PROGRAMFILES``, ``PROGRAMFILES(X86)``, ``PROGRAMDATA`` * Others (e.g. UNIX, macOS): ``TMPDIR`` You can override these variables with the ``setenv`` option. diff --git a/src/tox/config/__init__.py b/src/tox/config/__init__.py index b155fd1c..b49c06f6 100644 --- a/src/tox/config/__init__.py +++ b/src/tox/config/__init__.py @@ -807,6 +807,10 @@ def tox_addoption(parser): passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine() passenv.add("USERPROFILE") # needed for `os.path.expanduser()` passenv.add("MSYSTEM") # fixes #429 + # PROGRAM* required for compiler tool discovery #2382 + passenv.add("PROGRAMFILES") + passenv.add("PROGRAMFILES(X86)") + passenv.add("PROGRAMDATA") else: passenv.add("TMPDIR")
tox-dev/tox
eb1bd33d152f11f521805fe17e2240fb768e05d9
diff --git a/tests/unit/config/test_config.py b/tests/unit/config/test_config.py index 6949bf93..3408de02 100644 --- a/tests/unit/config/test_config.py +++ b/tests/unit/config/test_config.py @@ -1518,6 +1518,9 @@ class TestConfigTestEnv: assert "PROCESSOR_ARCHITECTURE" in envconfig.passenv assert "USERPROFILE" in envconfig.passenv assert "MSYSTEM" in envconfig.passenv + assert "PROGRAMFILES" in envconfig.passenv + assert "PROGRAMFILES(X86)" in envconfig.passenv + assert "PROGRAMDATA" in envconfig.passenv else: assert "TMPDIR" in envconfig.passenv if sys.platform != "win32": @@ -1562,6 +1565,9 @@ class TestConfigTestEnv: assert "SYSTEMROOT" in envconfig.passenv assert "TEMP" in envconfig.passenv assert "TMP" in envconfig.passenv + assert "PROGRAMFILES" in envconfig.passenv + assert "PROGRAMFILES(X86)" in envconfig.passenv + assert "PROGRAMDATA" in envconfig.passenv else: assert "TMPDIR" in envconfig.passenv assert "PATH" in envconfig.passenv
Pass through ProgramFiles* and ProgramData by default I'd like to follow up on #1673. I couldn't comment or even +1 that issue. I believe the characterization of the issue as setuptools-specific was incorrect. Although the issue does affect Setuptools and not Flit, that's only because Setuptools supports building C extensions on Windows. The issue can't be fixed in Setuptools. By the time tox has masked the ProgramFiles env var, Setuptools has little hope of recovering that setting. We're not asking tox to patch Setuptools. Instead, we're asking tox to consider honoring the system's intrinsic environment configuration by passing through system-level variables, variables that would be unlikely to be configured by the user and which are necessary for basic operation on the platform (similar to `PATH`, `SYSTEMDRIVE`, and `SYSTEMROOT`). Failing to support this model will instead require every project that builds extension modules on Windows to bypass this setting in their tox config. I believe this issue was largely missed until recently because most users were still testing on older platforms/compilers that did not rely on vswhere for discovery, but now that Windows 2022 is the default in Github, it's affecting a wide array of users. I contend: - this exemption is required in every case it affects - there is no known case that this masking is currently important - the number of affected projects is large. I can put together a repro that doesn't involve Setuptools if that helps persuade.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]" ]
[ "tests/unit/config/test_config.py::TestIniParserPrefix::test_other_section_substitution", "tests/unit/config/test_config.py::TestIniParserPrefix::test_fallback_sections", "tests/unit/config/test_config.py::TestIniParserPrefix::test_basic_section_access", "tests/unit/config/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substitution", "tests/unit/config/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args2-2]", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_no_arg", "tests/unit/config/test_config.py::TestGlobalOptions::test_sdist_specification", "tests/unit/config/test_config.py::TestGlobalOptions::test_env_selection_with_section_name", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args0-0]", "tests/unit/config/test_config.py::TestGlobalOptions::test_substitution_jenkins_default", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args4-3]", "tests/unit/config/test_config.py::TestGlobalOptions::test_substitution_jenkins_context", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_not_specified", "tests/unit/config/test_config.py::TestGlobalOptions::test_correct_basepython_chosen_from_default_factors", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_true", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args0-0]", "tests/unit/config/test_config.py::TestGlobalOptions::test_defaultenv_partial_override", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args3-2]", "tests/unit/config/test_config.py::TestGlobalOptions::test_envlist_expansion", "tests/unit/config/test_config.py::TestGlobalOptions::test_defaultenv_commandline", "tests/unit/config/test_config.py::TestGlobalOptions::test_envlist_cross_product", "tests/unit/config/test_config.py::TestGlobalOptions::test_envlist_multiline", "tests/unit/config/test_config.py::TestGlobalOptions::test_py_venv", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args2-2]", "tests/unit/config/test_config.py::TestGlobalOptions::test_quiet[args1-1]", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args4-3]", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args1-1]", "tests/unit/config/test_config.py::TestGlobalOptions::test_substitution_jenkins_global", "tests/unit/config/test_config.py::TestGlobalOptions::test_no_implicit_venv_from_cli_with_envlist", "tests/unit/config/test_config.py::TestGlobalOptions::test_env_selection_expanded_envlist", "tests/unit/config/test_config.py::TestGlobalOptions::test_notest", "tests/unit/config/test_config.py::TestGlobalOptions::test_verbosity[args3-2]", "tests/unit/config/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_false", "tests/unit/config/test_config.py::TestParseconfig::test_search_parents", "tests/unit/config/test_config.py::TestParseconfig::test_explicit_config_path", "tests/unit/config/test_config.py::TestParseconfig::test_workdir_gets_resolved", "tests/unit/config/test_config.py::TestGetcontextname::test_blank", "tests/unit/config/test_config.py::TestGetcontextname::test_jenkins", "tests/unit/config/test_config.py::TestGetcontextname::test_hudson_legacy", "tests/unit/config/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers", "tests/unit/config/test_config.py::TestIndexServer::test_parse_indexserver", "tests/unit/config/test_config.py::TestIndexServer::test_indexserver", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]", "tests/unit/config/test_config.py::TestConfigPlatform::test_platform_install_command", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform_rex", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform", "tests/unit/config/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[osx]", "tests/unit/config/test_config.py::test_get_homedir", "tests/unit/config/test_config.py::test_env_spec[-e", "tests/unit/config/test_config.py::test_config_setup_cfg_no_tox_section", "tests/unit/config/test_config.py::test_isolated_build_ignores[deps-crazy-default0]", "tests/unit/config/test_config.py::test_interactive", "tests/unit/config/test_config.py::test_config_current_py", "tests/unit/config/test_config.py::test_config_bad_config_type_specified", "tests/unit/config/test_config.py::test_config_via_pyproject_legacy", "tests/unit/config/test_config.py::test_overwrite_skip_install_override", "tests/unit/config/test_config.py::test_config_bad_pyproject_specified", "tests/unit/config/test_config.py::test_posargs_relative_changedir", "tests/unit/config/test_config.py::test_config_file_not_required_with_devenv", "tests/unit/config/test_config.py::test_interactive_na", "tests/unit/config/test_config.py::test_provision_tox_env_cannot_be_in_envlist", "tests/unit/config/test_config.py::test_isolated_build_env_cannot_be_in_envlist", "tests/unit/config/test_config.py::test_config_no_version_data_in__name", "tests/unit/config/test_config.py::test_isolated_build_overrides", "tests/unit/config/test_config.py::test_isolated_build_ignores[sitepackages-True-False]", "tests/unit/config/test_config.py::test_interactive_available", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_for_multiple_words", "tests/unit/config/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_for_word", "tests/unit/config/test_config.py::TestCommandParser::test_command_parsing_for_issue_10", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_for_posargs", "tests/unit/config/test_config.py::TestCommandParser::test_commands_with_backslash", "tests/unit/config/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces", "tests/unit/config/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set", "tests/unit/config/test_config.py::TestCmdInvocation::test_version_no_plugins", "tests/unit/config/test_config.py::TestCmdInvocation::test_version_with_normal_plugin", "tests/unit/config/test_config.py::TestCmdInvocation::test_no_tox_ini", "tests/unit/config/test_config.py::TestCmdInvocation::test_version_with_fileless_module", "tests/unit/config/test_config.py::TestCmdInvocation::test_version_simple", "tests/unit/config/test_config.py::TestCmdInvocation::test_help", "tests/unit/config/test_config.py::TestConfigPackage::test_defaults", "tests/unit/config/test_config.py::TestConfigPackage::test_defaults_changed_dir", "tests/unit/config/test_config.py::TestConfigPackage::test_defaults_distshare", "tests/unit/config/test_config.py::TestConfigPackage::test_project_paths", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_regression_issue595", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_other_section", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_simple", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_inherit", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_recursion_error_unnecessary", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_posargs", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_posargs_with_spaced_colon", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_default_escape", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_multi_env", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_posargs_with_colon", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_setenv", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_posargs_with_colon", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_missing_substitution_complex", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_recursion_error_other_section", "tests/unit/config/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_recursion_error_same_section", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_windows_escaping", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_quoted_posargs", "tests/unit/config/test_config.py::TestIniParser::test_substitution_empty", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_quoting_in_command", "tests/unit/config/test_config.py::TestIniParser::test_getpath", "tests/unit/config/test_config.py::TestIniParser::test_argvlist", "tests/unit/config/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone", "tests/unit/config/test_config.py::TestIniParser::test_getdict", "tests/unit/config/test_config.py::TestIniParser::test_missing_env_sub_populates_missing_subs", "tests/unit/config/test_config.py::TestIniParser::test_missing_env_sub_raises_config_error_in_non_testenv", "tests/unit/config/test_config.py::TestIniParser::test_substitution_with_multiple_words", "tests/unit/config/test_config.py::TestIniParser::test_expand_section_name", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_multiline", "tests/unit/config/test_config.py::TestIniParser::test_getstring_fallback_sections", "tests/unit/config/test_config.py::TestIniParser::test_normal_env_sub_works", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes", "tests/unit/config/test_config.py::TestIniParser::test_getstring_other_section_substitution", "tests/unit/config/test_config.py::TestIniParser::test_getbool", "tests/unit/config/test_config.py::TestIniParser::test_substitution_colon_prefix", "tests/unit/config/test_config.py::TestIniParser::test_value_matches_section_substitution", "tests/unit/config/test_config.py::TestIniParser::test_getstring_single", "tests/unit/config/test_config.py::TestIniParser::test_getargv", "tests/unit/config/test_config.py::TestIniParser::test_getlist", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_command_contains_hash", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_comment_after_command", "tests/unit/config/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default", "tests/unit/config/test_config.py::TestIniParser::test_argvlist_positional_substitution", "tests/unit/config/test_config.py::TestIniParser::test_getstring_substitution", "tests/unit/config/test_config.py::TestIniParser::test_missing_substitution", "tests/unit/config/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310", "tests/unit/config/test_config.py::TestIniParser::test_value_doesn_match_section_substitution", "tests/unit/config/test_config.py::TestParseEnv::test_parse_recreate", "tests/unit/config/test_config.py::TestConfigTestEnv::test_do_not_substitute_more_than_needed", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors_support_curly_braces", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_from_global_env", "tests/unit/config/test_config.py::TestConfigTestEnv::test_rewrite_posargs", "tests/unit/config/test_config.py::TestConfigTestEnv::test_changedir_override", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_ignore_outcome", "tests/unit/config/test_config.py::TestConfigTestEnv::test_changedir", "tests/unit/config/test_config.py::TestConfigTestEnv::test_default_factors", "tests/unit/config/test_config.py::TestConfigTestEnv::test_ignore_errors", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_double", "tests/unit/config/test_config.py::TestConfigTestEnv::test_default_factors_conflict_lying_name", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors_groups_touch", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors", "tests/unit/config/test_config.py::TestConfigTestEnv::test_install_command_substitutions_other_section", "tests/unit/config/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_with_factor", "tests/unit/config/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factor_use_not_checked", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue515", "tests/unit/config/test_config.py::TestConfigTestEnv::test_period_in_factor", "tests/unit/config/test_config.py::TestConfigTestEnv::test_install_command_substitutions", "tests/unit/config/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir_win[/-bin]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors_in_boolean", "tests/unit/config/test_config.py::TestConfigTestEnv::test_multilevel_substitution", "tests/unit/config/test_config.py::TestConfigTestEnv::test_install_command_setting", "tests/unit/config/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir_win[\\\\-Scripts]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_nested_env_defaults", "tests/unit/config/test_config.py::TestConfigTestEnv::test_commentchars_issue33", "tests/unit/config/test_config.py::TestConfigTestEnv::test_simple", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir", "tests/unit/config/test_config.py::TestConfigTestEnv::test_defaults", "tests/unit/config/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factor_expansion", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_positional", "tests/unit/config/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env", "tests/unit/config/test_config.py::TestConfigTestEnv::test_default_factors_conflict", "tests/unit/config/test_config.py::TestConfigTestEnv::test_allowlist_externals", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envconfigs_based_on_factors", "tests/unit/config/test_config.py::TestConfigTestEnv::test_specific_command_overrides", "tests/unit/config/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages", "tests/unit/config/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted", "tests/unit/config/test_config.py::TestConfigTestEnv::test_regression_test_issue_706[envlist0]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_defaults", "tests/unit/config/test_config.py::TestConfigTestEnv::test_sitepackages_switch", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factors_in_setenv", "tests/unit/config/test_config.py::TestConfigTestEnv::test_factor_ops", "tests/unit/config/test_config.py::TestConfigTestEnv::test_no_spinner", "tests/unit/config/test_config.py::TestConfigTestEnv::test_pip_pre", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240", "tests/unit/config/test_config.py::TestConfigTestEnv::test_curly_braces_in_setenv", "tests/unit/config/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]", "tests/unit/config/test_config.py::TestConfigTestEnv::test_substitution_error", "tests/unit/config/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]", "tests/unit/config/test_config.py::TestVenvConfig::test_process_deps", "tests/unit/config/test_config.py::TestVenvConfig::test_envdir_set_manually_setup_cfg", "tests/unit/config/test_config.py::TestVenvConfig::test_force_dep_version", "tests/unit/config/test_config.py::TestVenvConfig::test_config_parsing_multienv", "tests/unit/config/test_config.py::TestVenvConfig::test_force_dep_with_url", "tests/unit/config/test_config.py::TestVenvConfig::test_envdir_set_manually", "tests/unit/config/test_config.py::TestVenvConfig::test_suicide_interrupt_terminate_timeout_set_manually", "tests/unit/config/test_config.py::TestVenvConfig::test_config_parsing_minimal", "tests/unit/config/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions", "tests/unit/config/test_config.py::TestVenvConfig::test_is_same_dep", "tests/unit/config/test_config.py::TestHashseedOption::test_passing_empty_string", "tests/unit/config/test_config.py::TestHashseedOption::test_setenv_in_one_testenv", "tests/unit/config/test_config.py::TestHashseedOption::test_passing_string", "tests/unit/config/test_config.py::TestHashseedOption::test_default", "tests/unit/config/test_config.py::TestHashseedOption::test_noset_with_setenv", "tests/unit/config/test_config.py::TestHashseedOption::test_setenv", "tests/unit/config/test_config.py::TestHashseedOption::test_passing_no_argument", "tests/unit/config/test_config.py::TestHashseedOption::test_passing_integer", "tests/unit/config/test_config.py::TestHashseedOption::test_one_random_hashseed", "tests/unit/config/test_config.py::TestHashseedOption::test_noset", "tests/unit/config/test_config.py::TestSetenv::test_setenv_uses_other_setenv", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[\\n-False]", "tests/unit/config/test_config.py::TestSetenv::test_setenv_recursive_direct_with_default", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[#MAGIC", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[\\nMAGIC", "tests/unit/config/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294", "tests/unit/config/test_config.py::TestSetenv::test_setenv_cross_section_mixed", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[None-False]", "tests/unit/config/test_config.py::TestSetenv::test_setenv_recursive_direct_with_default_nested", "tests/unit/config/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython", "tests/unit/config/test_config.py::TestSetenv::test_setenv_uses_os_environ", "tests/unit/config/test_config.py::TestSetenv::test_setenv_recursive_direct_without_default", "tests/unit/config/test_config.py::TestSetenv::test_getdict_lazy_update", "tests/unit/config/test_config.py::TestSetenv::test_setenv_ordering_1", "tests/unit/config/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice", "tests/unit/config/test_config.py::TestSetenv::test_setenv_comment", "tests/unit/config/test_config.py::TestSetenv::test_setenv_default_os_environ", "tests/unit/config/test_config.py::TestSetenv::test_setenv_env_file[MAGIC=yes-True]", "tests/unit/config/test_config.py::TestSetenv::test_setenv_overrides", "tests/unit/config/test_config.py::TestSetenv::test_getdict_lazy", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep[;]", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_dirsep_replace[\\\\]", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_dirsep_replace[\\\\\\\\]", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_dirsep_regex", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep[:]", "tests/unit/config/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex" ]
{ "failed_lite_validators": [ "has_issue_reference", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-03-21T15:49:08Z"
mit
tox-dev__tox-2529
diff --git a/CONTRIBUTORS b/CONTRIBUTORS index e0259323..09ae1344 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -33,6 +33,7 @@ Cyril Roelandt Dane Hillard David Staheli David Diaz +Dmitrii Sutiagin a.k.a. f3flight Ederag Eli Collins Eugene Yunak diff --git a/docs/changelog/2528.bugfix.rst b/docs/changelog/2528.bugfix.rst new file mode 100644 index 00000000..c5cecd41 --- /dev/null +++ b/docs/changelog/2528.bugfix.rst @@ -0,0 +1,1 @@ +Add env cleanup to envreport - fix PYTHONPATH leak into "envreport" -- by :user:`f3flight`. diff --git a/src/tox/venv.py b/src/tox/venv.py index 8acb0c77..47f281f6 100644 --- a/src/tox/venv.py +++ b/src/tox/venv.py @@ -840,7 +840,11 @@ def tox_runtest_post(venv): def tox_runenvreport(venv, action): # write out version dependency information args = venv.envconfig.list_dependencies_command - output = venv._pcall(args, cwd=venv.envconfig.config.toxinidir, action=action, returnout=True) + env = venv._get_os_environ() + venv.ensure_pip_os_environ_ok(env) + output = venv._pcall( + args, cwd=venv.envconfig.config.toxinidir, action=action, returnout=True, env=env + ) # the output contains a mime-header, skip it output = output.split("\n\n")[-1] packages = output.strip().split("\n")
tox-dev/tox
0f0c505244f82b85b4c73f5f7ba33bc499b5e163
diff --git a/tests/unit/test_venv.py b/tests/unit/test_venv.py index aa78a48b..3da4e22e 100644 --- a/tests/unit/test_venv.py +++ b/tests/unit/test_venv.py @@ -14,6 +14,7 @@ from tox.venv import ( VirtualEnv, getdigest, prepend_shebang_interpreter, + tox_runenvreport, tox_testenv_create, tox_testenv_install_deps, ) @@ -1233,3 +1234,17 @@ def test_path_change(tmpdir, mocksession, newconfig, monkeypatch): path = x.env["PATH"] assert os.environ["PATH"] in path assert path.endswith(str(venv.envconfig.config.toxinidir) + "/bin") + + +def test_runenvreport_pythonpath_discarded(newmocksession, mocker): + mock_os_environ = mocker.patch("tox.venv.VirtualEnv._get_os_environ") + mocksession = newmocksession([], "") + venv = mocksession.getvenv("python") + mock_os_environ.return_value = dict(PYTHONPATH="/some/path/") + mock_pcall = mocker.patch.object(venv, "_pcall") + tox_runenvreport(venv, None) + try: + env = mock_pcall.mock_calls[0].kwargs["env"] + except TypeError: # older pytest (python 3.7 and below) + env = mock_pcall.mock_calls[0][2]["env"] + assert "PYTHONPATH" not in env
PYTHONPATH leaks into "envreport" Pretty trivial bug. Even though tox discards PYTHONPATH for `pip install`, it does not discard it for `pip freeze`, causing garbage output if PYTHONPATH has any extra packages (i.e. "pip freeze" will see packages from PYTHONPATH, which it should not do). https://github.com/tox-dev/tox/blob/3.27.0/src/tox/venv.py#L843 Fix is trivial - fetch env and apply `ensure_pip_os_environ_ok` the same way it is done for `pip install`, pass env to pcall. I will make a PR.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/test_venv.py::test_runenvreport_pythonpath_discarded" ]
[ "tests/unit/test_venv.py::TestVenvTest::test_pythonpath_remove", "tests/unit/test_venv.py::TestVenvTest::test_pythonpath_keep", "tests/unit/test_venv.py::TestVenvTest::test_pythonpath_empty", "tests/unit/test_venv.py::TestVenvTest::test_envbindir_path", "tests/unit/test_venv.py::test_install_error", "tests/unit/test_venv.py::test_create_KeyboardInterrupt[_pcall]", "tests/unit/test_venv.py::test_develop_extras", "tests/unit/test_venv.py::test_install_python3", "tests/unit/test_venv.py::test_test_runtests_action_command_is_in_output", "tests/unit/test_venv.py::test_commandpath_venv_precedence", "tests/unit/test_venv.py::test_create", "tests/unit/test_venv.py::test_create_KeyboardInterrupt[update]", "tests/unit/test_venv.py::test_create_sitepackages", "tests/unit/test_venv.py::test_install_deps_indexserver", "tests/unit/test_venv.py::test_install_sdist_extras", "tests/unit/test_venv.py::test_env_variables_added_to_needs_reinstall", "tests/unit/test_venv.py::test_getdigest", "tests/unit/test_venv.py::test_install_recreate", "tests/unit/test_venv.py::test_install_deps_wildcard", "tests/unit/test_venv.py::test_install_command_allowlisted_exclusive", "tests/unit/test_venv.py::test_getsupportedinterpreter", "tests/unit/test_venv.py::test_test_empty_commands", "tests/unit/test_venv.py::test_install_deps_pre", "tests/unit/test_venv.py::test_install_command_whitelisted", "tests/unit/test_venv.py::test_install_command_not_installed_bash", "tests/unit/test_venv.py::test_install_command_allowlisted", "tests/unit/test_venv.py::test_installpkg_indexserver", "tests/unit/test_venv.py::test_test_hashseed_is_in_output", "tests/unit/test_venv.py::test_install_command_not_installed", "tests/unit/test_venv.py::test_create_download[True]", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_arg", "tests/unit/test_venv.py::test_install_command_verbosity[6-3]", "tests/unit/test_venv.py::test_installpkg_upgrade", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_interpreter_ws", "tests/unit/test_venv.py::test_tox_testenv_create", "tests/unit/test_venv.py::test_run_install_command", "tests/unit/test_venv.py::test_run_install_command_handles_KeyboardInterrupt", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_real", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_non_utf8", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_simple", "tests/unit/test_venv.py::test_create_download[None]", "tests/unit/test_venv.py::test_env_variables_added_to_pcall", "tests/unit/test_venv.py::test_create_download[False]", "tests/unit/test_venv.py::test_install_command_verbosity[3-1]", "tests/unit/test_venv.py::test_install_command_verbosity[4-2]", "tests/unit/test_venv.py::test_ignore_outcome_failing_cmd", "tests/unit/test_venv.py::test_command_relative_issue36", "tests/unit/test_venv.py::test_path_change", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_instance", "tests/unit/test_venv.py::test_install_command_verbosity[2-0]", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_skip_truncated", "tests/unit/test_venv.py::test_install_command_verbosity[0-0]", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_interpreter", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_ws", "tests/unit/test_venv.py::test_install_command_verbosity[1-0]", "tests/unit/test_venv.py::test_tox_testenv_pre_post", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_args", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_long_example", "tests/unit/test_venv.py::test_run_custom_install_command", "tests/unit/test_venv.py::test_install_command_verbosity[5-3]", "tests/unit/test_venv.py::test_installpkg_no_upgrade", "tests/unit/test_venv.py::test_ignore_outcome_missing_cmd", "tests/unit/test_venv.py::TestCreationConfig::test_basic", "tests/unit/test_venv.py::TestCreationConfig::test_python_recreation", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies_file", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies", "tests/unit/test_venv.py::TestCreationConfig::test_develop_recreation", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies_latest", "tests/unit/test_venv.py::TestCreationConfig::test_dep_recreation" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-11-03T22:07:58Z"
mit
tox-dev__tox-2547
diff --git a/docs/changelog/2373.bugfix.rst b/docs/changelog/2373.bugfix.rst new file mode 100644 index 00000000..1fb3436f --- /dev/null +++ b/docs/changelog/2373.bugfix.rst @@ -0,0 +1,1 @@ +Allow ``--hash`` to be specified in requirements.txt files. - by :user:`masenf`. diff --git a/src/tox/tox_env/python/pip/req/args.py b/src/tox/tox_env/python/pip/req/args.py index 19bded34..23f98ec3 100644 --- a/src/tox/tox_env/python/pip/req/args.py +++ b/src/tox/tox_env/python/pip/req/args.py @@ -18,10 +18,10 @@ class _OurArgumentParser(ArgumentParser): raise ValueError(msg) -def build_parser(cli_only: bool) -> ArgumentParser: +def build_parser() -> ArgumentParser: parser = _OurArgumentParser(add_help=False, prog="", allow_abbrev=False) _global_options(parser) - _req_options(parser, cli_only) + _req_options(parser) return parser @@ -47,11 +47,10 @@ def _global_options(parser: ArgumentParser) -> None: ) -def _req_options(parser: ArgumentParser, cli_only: bool) -> None: +def _req_options(parser: ArgumentParser) -> None: parser.add_argument("--install-option", action=AddSortedUniqueAction) parser.add_argument("--global-option", action=AddSortedUniqueAction) - if not cli_only: - parser.add_argument("--hash", action=AddSortedUniqueAction, type=_validate_hash) + parser.add_argument("--hash", action=AddSortedUniqueAction, type=_validate_hash) _HASH = re.compile(r"sha(256:[a-f0-9]{64}|384:[a-f0-9]{96}|512:[a-f0-9]{128})") diff --git a/src/tox/tox_env/python/pip/req/file.py b/src/tox/tox_env/python/pip/req/file.py index 4ad4ae06..df11ebe5 100644 --- a/src/tox/tox_env/python/pip/req/file.py +++ b/src/tox/tox_env/python/pip/req/file.py @@ -156,7 +156,7 @@ class RequirementsFile: @property def _parser(self) -> ArgumentParser: if self._parser_private is None: - self._parser_private = build_parser(False) + self._parser_private = build_parser() return self._parser_private def _ensure_requirements_parsed(self) -> None: diff --git a/src/tox/tox_env/python/pip/req_file.py b/src/tox/tox_env/python/pip/req_file.py index 1f8754a8..91202345 100644 --- a/src/tox/tox_env/python/pip/req_file.py +++ b/src/tox/tox_env/python/pip/req_file.py @@ -1,14 +1,17 @@ from __future__ import annotations import re -from argparse import ArgumentParser +from argparse import Namespace from pathlib import Path -from .req.args import build_parser -from .req.file import ReqFileLines, RequirementsFile +from .req.file import ParsedRequirement, ReqFileLines, RequirementsFile class PythonDeps(RequirementsFile): + # these options are valid in requirements.txt, but not via pip cli and + # thus cannot be used in the testenv `deps` list + _illegal_options = ["hash"] + def __init__(self, raw: str, root: Path): super().__init__(root / "tox.ini", constraint=False) self._raw = self._normalize_raw(raw) @@ -28,12 +31,6 @@ class PythonDeps(RequirementsFile): line = f"{line[0:2]} {line[2:]}" yield at, line - @property - def _parser(self) -> ArgumentParser: - if self._parser_private is None: - self._parser_private = build_parser(cli_only=True) # e.g. no --hash for cli only - return self._parser_private - def lines(self) -> list[str]: return self._raw.splitlines() @@ -68,6 +65,20 @@ class PythonDeps(RequirementsFile): raw = f"{adjusted}\n" if raw.endswith("\\\n") else adjusted # preserve trailing newline if input has it return raw + def _parse_requirements(self, opt: Namespace, recurse: bool) -> list[ParsedRequirement]: + # check for any invalid options in the deps list + # (requirements recursively included from other files are not checked) + requirements = super()._parse_requirements(opt, recurse) + for r in requirements: + if r.from_file != str(self.path): + continue + for illegal_option in self._illegal_options: + if r.options.get(illegal_option): + raise ValueError( + f"Cannot use --{illegal_option} in deps list, it must be in requirements file. ({r})", + ) + return requirements + def unroll(self) -> tuple[list[str], list[str]]: if self._unroll is None: opts_dict = vars(self.options)
tox-dev/tox
023a4ed403f42915da52151ce296e0f398b67005
diff --git a/tests/tox_env/python/pip/test_req_file.py b/tests/tox_env/python/pip/test_req_file.py index 41908f96..66a0db9a 100644 --- a/tests/tox_env/python/pip/test_req_file.py +++ b/tests/tox_env/python/pip/test_req_file.py @@ -14,3 +14,31 @@ def test_legacy_requirement_file(tmp_path: Path, legacy_flag: str) -> None: assert python_deps.as_root_args == [legacy_flag, "a.txt"] assert vars(python_deps.options) == {} assert [str(i) for i in python_deps.requirements] == ["b" if legacy_flag == "-r" else "-c b"] + + +def test_deps_with_hash(tmp_path: Path) -> None: + """deps with --hash should raise an exception.""" + python_deps = PythonDeps( + raw="foo==1 --hash sha256:97a702083b0d906517b79672d8501eee470d60ae55df0fa9d4cfba56c7f65a82", + root=tmp_path, + ) + with pytest.raises(ValueError, match="Cannot use --hash in deps list"): + _ = python_deps.requirements + + +def test_deps_with_requirements_with_hash(tmp_path: Path) -> None: + """deps can point to a requirements file that has --hash.""" + exp_hash = "sha256:97a702083b0d906517b79672d8501eee470d60ae55df0fa9d4cfba56c7f65a82" + requirements = tmp_path / "requirements.txt" + requirements.write_text( + f"foo==1 --hash {exp_hash}", + ) + python_deps = PythonDeps( + raw="-r requirements.txt", + root=tmp_path, + ) + assert len(python_deps.requirements) == 1 + parsed_req = python_deps.requirements[0] + assert str(parsed_req.requirement) == "foo==1" + assert parsed_req.options == {"hash": [exp_hash]} + assert parsed_req.from_file == str(requirements)
tox4: fails to process requirement files with --hash There is a regression on tox4 where it fails to parse requirement files that contain hashes. While these are not very popular they are still the recommended for security reasons as they protect against potential hacks on pypi registry. Example of file that causes tox4 to fail, while it works fine with tox3: https://github.com/ansible/ansible-language-server/blob/v0.5.0/docs/requirements.txt ``` During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/ssbarnea/.pyenv/versions/3.10.2/lib/python3.10/site-packages/tox/session/cmd/run/single.py", line 45, in _evaluate tox_env.setup() File "/Users/ssbarnea/.pyenv/versions/3.10.2/lib/python3.10/site-packages/tox/tox_env/api.py", line 226, in setup self._setup_env() File "/Users/ssbarnea/.pyenv/versions/3.10.2/lib/python3.10/site-packages/tox/tox_env/python/runner.py", line 91, in _setup_env self._install_deps() File "/Users/ssbarnea/.pyenv/versions/3.10.2/lib/python3.10/site-packages/tox/tox_env/python/runner.py", line 95, in _install_deps self.installer.install(requirements_file, PythonRun.__name__, "deps") File "/Users/ssbarnea/.pyenv/versions/3.10.2/lib/python3.10/site-packages/tox/tox_env/python/pip/pip_install.py", line 84, in install self._install_requirement_file(arguments, section, of_type) File "/Users/ssbarnea/.pyenv/versions/3.10.2/lib/python3.10/site-packages/tox/tox_env/python/pip/pip_install.py", line 95, in _install_requirement_file raise HandledError(f"{exception} for tox env py within deps") tox.report.HandledError: unrecognized arguments: --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 for tox env py within deps ``` It should be remarked that these files are produced by pip-compile (pip-tools). Note: I temporary removed the hashes from the lock file but we cannot really ignore this issue.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/tox_env/python/pip/test_req_file.py::test_deps_with_hash", "tests/tox_env/python/pip/test_req_file.py::test_deps_with_requirements_with_hash" ]
[ "tests/tox_env/python/pip/test_req_file.py::test_legacy_requirement_file[-r]", "tests/tox_env/python/pip/test_req_file.py::test_legacy_requirement_file[-c]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2022-11-25T11:28:53Z"
mit
tox-dev__tox-2643
diff --git a/docs/changelog/2620.bugfix.rst b/docs/changelog/2620.bugfix.rst new file mode 100644 index 00000000..ba4c5bd5 --- /dev/null +++ b/docs/changelog/2620.bugfix.rst @@ -0,0 +1,1 @@ +Ensure :ref:`change_dir` is created if does not exist before executing :ref:`commands` - by :user:`gaborbernat`. diff --git a/src/tox/session/cmd/run/single.py b/src/tox/session/cmd/run/single.py index 9cc3e45e..9ef909d1 100644 --- a/src/tox/session/cmd/run/single.py +++ b/src/tox/session/cmd/run/single.py @@ -71,6 +71,7 @@ def run_commands(tox_env: RunToxEnv, no_test: bool) -> tuple[int, list[Outcome]] from tox.plugin.manager import MANAGER # importing this here to avoid circular import chdir: Path = tox_env.conf["change_dir"] + chdir.mkdir(exist_ok=True, parents=True) ignore_errors: bool = tox_env.conf["ignore_errors"] MANAGER.tox_before_run_commands(tox_env) status_pre, status_main, status_post = -1, -1, -1
tox-dev/tox
267d3275ad929b12e951dc8a6d2b73aa3e61b168
diff --git a/tests/tox_env/test_tox_env_api.py b/tests/tox_env/test_tox_env_api.py index c5542dbc..2c467bb6 100644 --- a/tests/tox_env/test_tox_env_api.py +++ b/tests/tox_env/test_tox_env_api.py @@ -86,3 +86,10 @@ def test_tox_env_pass_env_match_ignore_case(char: str, glob: str) -> None: with patch("os.environ", {"A1": "1", "a2": "2", "A2": "3", "B": "4"}): env = ToxEnv._load_pass_env([f"{char}{glob}"]) assert env == {"A1": "1", "a2": "2", "A2": "3"} + + +def test_change_dir_is_created_if_not_exist(tox_project: ToxProjectCreator) -> None: + prj = tox_project({"tox.ini": "[testenv]\npackage=skip\nchange_dir=a{/}b\ncommands=python --version"}) + result_first = prj.run("r") + result_first.assert_success() + assert (prj.path / "a" / "b").exists()
change_dir fails if directory does not exist I found the issue I think: `change_dir` says that the directory is created if it doesn't exist, but that does not seem to be the case: > Change to this working directory when executing the test command. If the directory does not exist yet, it will be created (required for Windows to be able to execute any command). with `change_dir = foo` the tests don't run and show the error above. When I do `mkdir foo` myself before, it works. _Originally posted by @maxnoe in https://github.com/tox-dev/tox/issues/2612#issuecomment-1341515799_
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/tox_env/test_tox_env_api.py::test_change_dir_is_created_if_not_exist" ]
[ "tests/tox_env/test_tox_env_api.py::test_tox_env_pass_env_literal_exist", "tests/tox_env/test_tox_env_api.py::test_recreate", "tests/tox_env/test_tox_env_api.py::test_tox_env_pass_env_match_ignore_case[A-*]", "tests/tox_env/test_tox_env_api.py::test_tox_env_pass_env_literal_miss", "tests/tox_env/test_tox_env_api.py::test_allow_list_external_fail", "tests/tox_env/test_tox_env_api.py::test_tox_env_pass_env_match_ignore_case[a-*]", "tests/tox_env/test_tox_env_api.py::test_tox_env_pass_env_match_ignore_case[a-?]", "tests/tox_env/test_tox_env_api.py::test_env_log", "tests/tox_env/test_tox_env_api.py::test_tox_env_pass_env_match_ignore_case[A-?]" ]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2022-12-08T15:53:57Z"
mit
tox-dev__tox-2716
diff --git a/docs/changelog/2640.feature.rst b/docs/changelog/2640.feature.rst new file mode 100644 index 00000000..bbf9b022 --- /dev/null +++ b/docs/changelog/2640.feature.rst @@ -0,0 +1,3 @@ +Add ``py_dot_ver`` and ``py_impl`` constants to environments to show the current Python implementation and dot version +(e.g. ``3.11``) for the current environment. These can be also used as substitutions in ``tox.ini`` - by +:user:`gaborbernat`. diff --git a/docs/faq.rst b/docs/faq.rst index f14d6266..205d8d54 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -79,6 +79,16 @@ tox 4 - removed tox.ini keys | ``distdir`` | Use the ``TOX_PACKAGE`` environment variable.| +--------------------------+----------------------------------------------+ +tox 4 - basepython not resolved ++++++++++++++++++++++++++++++++ +The base python configuration is no longer resolved to ``pythonx.y`` format, instead is kept as ``py39``, and is +the virtualenv project that handles mapping that to a Python interpreter. If you were using this variable we recommend +moving to the newly added ``py_impl`` and ``py_dot_ver`` variables, for example: + +.. code-block:: ini + + deps = -r{py_impl}{py_dot_ver}-req.txt + tox 4 - substitutions removed +++++++++++++++++++++++++++++ - The ``distshare`` substitution has been removed. diff --git a/src/tox/tox_env/api.py b/src/tox/tox_env/api.py index 13a9a4aa..e87710d5 100644 --- a/src/tox/tox_env/api.py +++ b/src/tox/tox_env/api.py @@ -323,7 +323,7 @@ class ToxEnv(ABC): result = self._load_pass_env(pass_env) # load/paths_env might trigger a load of the environment variables, set result here, returns current state - self._env_vars, self._env_vars_pass_env, set_env.changed = result, pass_env, False + self._env_vars, self._env_vars_pass_env, set_env.changed = result, pass_env.copy(), False # set PATH here in case setting and environment variable requires access to the environment variable PATH result["PATH"] = self._make_path() for key in set_env: diff --git a/src/tox/tox_env/python/api.py b/src/tox/tox_env/python/api.py index 09975569..6f25dd6a 100644 --- a/src/tox/tox_env/python/api.py +++ b/src/tox/tox_env/python/api.py @@ -40,6 +40,10 @@ class PythonInfo(NamedTuple): def impl_lower(self) -> str: return self.implementation.lower() + @property + def version_dot(self) -> str: + return f"{self.version_info.major}.{self.version_info.minor}" + class Python(ToxEnv, ABC): def __init__(self, create_args: ToxEnvCreateArgs) -> None: @@ -81,6 +85,14 @@ class Python(ToxEnv, ABC): desc="python executable from within the tox environment", value=lambda: self.env_python(), ) + self.conf.add_constant("py_dot_ver", "<python major>.<python minor>", value=self.py_dot_ver) + self.conf.add_constant("py_impl", "python implementation", value=self.py_impl) + + def py_dot_ver(self) -> str: + return self.base_python.version_dot + + def py_impl(self) -> str: + return self.base_python.impl_lower def _default_pass_env(self) -> list[str]: env = super()._default_pass_env()
tox-dev/tox
b8b0803cb8b295d520e19831ad5b7520fd45755c
diff --git a/tests/session/cmd/test_show_config.py b/tests/session/cmd/test_show_config.py index 95b422ac..cf2e719a 100644 --- a/tests/session/cmd/test_show_config.py +++ b/tests/session/cmd/test_show_config.py @@ -72,6 +72,15 @@ def test_show_config_unused(tox_project: ToxProjectCreator) -> None: assert "\n# !!! unused: magic, magical\n" in outcome.out +def test_show_config_py_ver_impl_constants(tox_project: ToxProjectCreator) -> None: + tox_ini = "[testenv]\npackage=skip\ndeps= {py_impl}{py_dot_ver}" + outcome = tox_project({"tox.ini": tox_ini}).run("c", "-e", "py", "-k", "py_dot_ver", "py_impl", "deps") + outcome.assert_success() + py_ver = ".".join(str(i) for i in sys.version_info[0:2]) + impl = sys.implementation.name + assert outcome.out == f"[testenv:py]\npy_dot_ver = {py_ver}\npy_impl = {impl}\ndeps = {impl}{py_ver}\n" + + def test_show_config_exception(tox_project: ToxProjectCreator) -> None: project = tox_project( {
Different basepython in tox 4 The content of `{basepython}` is different in tox 4, for instance this breaks using requirement files which names depend on the python version. AFAICT this does not seem to be clearly expected. ```console ❯ cat tox.ini [testenv] skip_install = True allowlist_externals = echo commands = echo {basepython} ❯ tox -e py310 py310 run-test-pre: PYTHONHASHSEED='1129533865' py310 run-test: commands[0] | echo python3.10 python3.10 _________________________________________ summary _________________________________________ py310: commands succeeded congratulations :) ❯ tox4 r -e py310 py310: commands[0]> echo py310 py310 py310: OK (0.04=setup[0.03]+cmd[0.00] seconds) congratulations :) (0.08 seconds) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/session/cmd/test_show_config.py::test_show_config_py_ver_impl_constants" ]
[ "tests/session/cmd/test_show_config.py::test_show_config_ini_comment_path", "tests/session/cmd/test_show_config.py::test_show_config_select_only", "tests/session/cmd/test_show_config.py::test_show_config_pkg_env_skip", "tests/session/cmd/test_show_config.py::test_show_config_alias", "tests/session/cmd/test_show_config.py::test_show_config_timeout_custom", "tests/session/cmd/test_show_config.py::test_show_config_core_host_python", "tests/session/cmd/test_show_config.py::test_show_config_exception", "tests/session/cmd/test_show_config.py::test_show_config_commands", "tests/session/cmd/test_show_config.py::test_show_config_timeout_default", "tests/session/cmd/test_show_config.py::test_pass_env_config_default[True]", "tests/session/cmd/test_show_config.py::test_show_config_filter_keys", "tests/session/cmd/test_show_config.py::test_show_config_description_normalize", "tests/session/cmd/test_show_config.py::test_show_config_default_run_env", "tests/session/cmd/test_show_config.py::test_show_config_help", "tests/session/cmd/test_show_config.py::test_show_config_cli_flag", "tests/session/cmd/test_show_config.py::test_show_config_unused", "tests/session/cmd/test_show_config.py::test_pass_env_config_default[False]", "tests/session/cmd/test_show_config.py::test_show_config_pkg_env_once" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-12-14T20:10:01Z"
mit
tox-dev__tox-2907
diff --git a/docs/changelog/2702.bugfix.rst b/docs/changelog/2702.bugfix.rst new file mode 100644 index 00000000..7f3e64ee --- /dev/null +++ b/docs/changelog/2702.bugfix.rst @@ -0,0 +1,1 @@ +Forward ``HOME`` by default - by :user:`gschaffner`. diff --git a/src/tox/tox_env/api.py b/src/tox/tox_env/api.py index 1542a8fc..fcfc9e4b 100644 --- a/src/tox/tox_env/api.py +++ b/src/tox/tox_env/api.py @@ -218,6 +218,7 @@ class ToxEnv(ABC): "CPPFLAGS", # C++ compiler flags "LD_LIBRARY_PATH", # location of libs "LDFLAGS", # linker flags + "HOME", # needed for `os.path.expanduser()` on non-Windows systems ] if sys.stdout.isatty(): # if we're on a interactive shell pass on the TERM env.append("TERM")
tox-dev/tox
4408cff65d72d5662e85da632e74154dfc030ef2
diff --git a/tests/session/cmd/test_show_config.py b/tests/session/cmd/test_show_config.py index 9ef9f300..46a04246 100644 --- a/tests/session/cmd/test_show_config.py +++ b/tests/session/cmd/test_show_config.py @@ -117,7 +117,7 @@ def test_pass_env_config_default(tox_project: ToxProjectCreator, stdout_is_atty: expected = ( ["CC", "CCSHARED", "CFLAGS"] + (["COMSPEC"] if is_win else []) - + ["CPPFLAGS", "CURL_CA_BUNDLE", "CXX", "LANG", "LANGUAGE", "LDFLAGS", "LD_LIBRARY_PATH"] + + ["CPPFLAGS", "CURL_CA_BUNDLE", "CXX", "HOME", "LANG", "LANGUAGE", "LDFLAGS", "LD_LIBRARY_PATH"] + (["MSYSTEM", "NUMBER_OF_PROCESSORS", "PATHEXT"] if is_win else []) + ["PIP_*", "PKG_CONFIG", "PKG_CONFIG_PATH", "PKG_CONFIG_SYSROOT_DIR"] + (["PROCESSOR_ARCHITECTURE"] if is_win else [])
Tox 4 breaks in CI/CD pipelines where user does not exist ## Issue Starting with Tox 4, tox is failing to run unittests due to an exception in code trying to determiner HOME directory. ## Environment Our environment is using Jenkins with Docker based declarative pipelines but for the sake of the bug report, I'll demonstrate the problem with a direct Docker setup. ```console $ git clone https://github.com/psf/requests $ docker run --rm -it -u $UID -v $PWD:/src -w /src -e HOME=/src python:3.7 bash I have no name!@ffa04e72c39f:~$ pip freeze cachetools==5.2.0 chardet==5.1.0 colorama==0.4.6 distlib==0.3.6 filelock==3.8.2 importlib-metadata==5.1.0 packaging==22.0 platformdirs==2.6.0 pluggy==1.0.0 py==1.11.0 pyproject_api==1.2.1 six==1.16.0 tomli==2.0.1 tox==3.27.1 typing_extensions==4.4.0 virtualenv==20.17.1 zipp==3.11.0 ## Output of running tox Provide the output of `tox -rvv`: Moved the first comment as it makes the report exceed maximum size. ## Minimal example If possible, provide a minimal reproducer for the issue: ```console $ git clone https://github.com/psf/requests $ docker run --rm -it -u $UID -v $PWD:/src -w /src -e HOME=/src python:3.7 bash I have no name!@ffa04e72c39f:~$ pip install "tox<4" I have no name!@ffa04e72c39f:~$ .local/bin/tox -e py37 ---> works I have no name!@ffa04e72c39f:~$ pip install -U "tox" I have no name!@ffa04e72c39f:~$ .local/bin/tox -e py37 ---> fails ``` Blocking Tox to <4 does not seem to work for all our packages but I could reproduce our issue with requests at least so you can get an early report of the problem. You can see HOME environment variable is set on the Docker command line which we are doing in our Jenkins pipelines as well. We've had a similar issue in the past with running Spark based unittests as JVM is very picky about its environment but we would really like to avoid apply the same hack to get it to work with tox 4+.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/session/cmd/test_show_config.py::test_pass_env_config_default[True]", "tests/session/cmd/test_show_config.py::test_pass_env_config_default[False]" ]
[ "tests/session/cmd/test_show_config.py::test_show_config_default_run_env", "tests/session/cmd/test_show_config.py::test_show_config_commands", "tests/session/cmd/test_show_config.py::test_show_config_filter_keys", "tests/session/cmd/test_show_config.py::test_show_config_unused", "tests/session/cmd/test_show_config.py::test_show_config_py_ver_impl_constants", "tests/session/cmd/test_show_config.py::test_show_config_exception", "tests/session/cmd/test_show_config.py::test_show_config_empty_install_command_exception", "tests/session/cmd/test_show_config.py::test_show_config_pkg_env_once", "tests/session/cmd/test_show_config.py::test_show_config_pkg_env_skip", "tests/session/cmd/test_show_config.py::test_show_config_select_only", "tests/session/cmd/test_show_config.py::test_show_config_alias", "tests/session/cmd/test_show_config.py::test_show_config_description_normalize", "tests/session/cmd/test_show_config.py::test_show_config_ini_comment_path", "tests/session/cmd/test_show_config.py::test_show_config_cli_flag", "tests/session/cmd/test_show_config.py::test_show_config_timeout_default", "tests/session/cmd/test_show_config.py::test_show_config_timeout_custom", "tests/session/cmd/test_show_config.py::test_show_config_help", "tests/session/cmd/test_show_config.py::test_show_config_core_host_python", "tests/session/cmd/test_show_config.py::test_show_config_matching_env_section", "tests/session/cmd/test_show_config.py::test_package_env_inherits_from_pkgenv" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2023-01-30T09:42:47Z"
mit
tox-dev__tox-3013
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 43128a4a..dac77aba 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: - id: add-trailing-comma args: [--py36-plus] - repo: https://github.com/asottile/pyupgrade - rev: v3.3.1 + rev: v3.4.0 hooks: - id: pyupgrade args: ["--py37-plus"] @@ -52,7 +52,7 @@ repos: hooks: - id: flake8 additional_dependencies: - - flake8-bugbear==23.3.23 + - flake8-bugbear==23.5.9 - flake8-comprehensions==3.12 - flake8-pytest-style==1.7.2 - flake8-spellcheck==0.28 @@ -69,7 +69,7 @@ repos: - "@prettier/[email protected]" args: ["--print-width=120", "--prose-wrap=always"] - repo: https://github.com/igorshubovych/markdownlint-cli - rev: v0.33.0 + rev: v0.34.0 hooks: - id: markdownlint - repo: local diff --git a/docs/changelog/2925.bugfix.rst b/docs/changelog/2925.bugfix.rst new file mode 100644 index 00000000..f74003a9 --- /dev/null +++ b/docs/changelog/2925.bugfix.rst @@ -0,0 +1,1 @@ +Fix ``tox --devenv venv`` invocation without ``-e`` - by :user:`asottile`. diff --git a/pyproject.toml b/pyproject.toml index 87c2ce55..82810864 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ build-backend = "hatchling.build" requires = [ "hatch-vcs>=0.3", - "hatchling>=1.14", + "hatchling>=1.17", ] [project] @@ -51,23 +51,23 @@ dependencies = [ "cachetools>=5.3", "chardet>=5.1", "colorama>=0.4.6", - "filelock>=3.11", - 'importlib-metadata>=6.4.1; python_version < "3.8"', + "filelock>=3.12", + 'importlib-metadata>=6.6; python_version < "3.8"', "packaging>=23.1", - "platformdirs>=3.2", + "platformdirs>=3.5.1", "pluggy>=1", "pyproject-api>=1.5.1", 'tomli>=2.0.1; python_version < "3.11"', - 'typing-extensions>=4.5; python_version < "3.8"', - "virtualenv>=20.21", + 'typing-extensions>=4.6.2; python_version < "3.8"', + "virtualenv>=20.23", ] optional-dependencies.docs = [ - "furo>=2023.3.27", - "sphinx>=6.1.3", + "furo>=2023.5.20", + "sphinx>=7.0.1", "sphinx-argparse-cli>=1.11", "sphinx-autodoc-typehints!=1.23.4,>=1.23", "sphinx-copybutton>=0.5.2", - "sphinx-inline-tabs>=2022.1.2b11", + "sphinx-inline-tabs>=2023.4.21", "sphinxcontrib-towncrier>=0.2.1a0", "towncrier>=22.12", ] @@ -79,12 +79,12 @@ optional-dependencies.testing = [ "distlib>=0.3.6", "flaky>=3.7", "hatch-vcs>=0.3", - "hatchling>=1.14", - "psutil>=5.9.4", + "hatchling>=1.17", + "psutil>=5.9.5", "pytest>=7.3.1", - "pytest-cov>=4", + "pytest-cov>=4.1", "pytest-mock>=3.10", - "pytest-xdist>=3.2.1", + "pytest-xdist>=3.3.1", "re-assert>=1.1", 'time-machine>=2.9; implementation_name != "pypy"', "wheel>=0.40", diff --git a/src/tox/session/cmd/legacy.py b/src/tox/session/cmd/legacy.py index e92c3d27..82938d13 100644 --- a/src/tox/session/cmd/legacy.py +++ b/src/tox/session/cmd/legacy.py @@ -104,6 +104,8 @@ def legacy(state: State) -> int: if option.list_envs or option.list_envs_all: return list_env(state) if option.devenv_path: + if option.env.is_default_list: + option.env = CliEnv(["py"]) option.devenv_path = Path(option.devenv_path) return devenv(state) if option.parallel != 0: # only 0 means sequential diff --git a/tox.ini b/tox.ini index 13810f63..cf0a2b92 100644 --- a/tox.ini +++ b/tox.ini @@ -41,7 +41,7 @@ commands = description = format the code base to adhere to our styles, and complain about what we cannot do automatically skip_install = true deps = - pre-commit>=3.2.2 + pre-commit>=3.3.2 pass_env = {[testenv]passenv} PROGRAMDATA @@ -52,9 +52,9 @@ commands = [testenv:type] description = run type check on code base deps = - mypy==1.2 + mypy==1.3 types-cachetools>=5.3.0.5 - types-chardet>=5.0.4.3 + types-chardet>=5.0.4.6 commands = mypy src/tox mypy tests
tox-dev/tox
3238abf7e95fa7c5c041554452f4cee055f6c0d7
diff --git a/tests/session/cmd/test_legacy.py b/tests/session/cmd/test_legacy.py index 957149b6..73cc397a 100644 --- a/tests/session/cmd/test_legacy.py +++ b/tests/session/cmd/test_legacy.py @@ -78,14 +78,27 @@ def test_legacy_list_all(tox_project: ToxProjectCreator, mocker: MockerFixture, assert outcome.state.conf.options.show_core is False -def test_legacy_devenv(tox_project: ToxProjectCreator, mocker: MockerFixture, tmp_path: Path) -> None: - devenv = mocker.patch("tox.session.cmd.legacy.devenv") [email protected]( + "args", + [ + pytest.param((), id="empty"), + pytest.param(("-e", "py"), id="select"), + ], +) +def test_legacy_devenv( + tox_project: ToxProjectCreator, + mocker: MockerFixture, + tmp_path: Path, + args: tuple[str, ...], +) -> None: + run_sequential = mocker.patch("tox.session.cmd.devenv.run_sequential") into = tmp_path / "b" - outcome = tox_project({"tox.ini": ""}).run("le", "--devenv", str(into), "-e", "py") + outcome = tox_project({"tox.ini": ""}).run("le", "--devenv", str(into), *args) - assert devenv.call_count == 1 + assert run_sequential.call_count == 1 assert outcome.state.conf.options.devenv_path == into + assert set(outcome.state.conf.options.env) == {"py"} def test_legacy_run_parallel(tox_project: ToxProjectCreator, mocker: MockerFixture) -> None:
"IndexError: list index out of range" when using --devenv flag ## Issue Got following error: ``` tox --devenv venv Traceback (most recent call last): File "/Users/xzk/.local/bin/tox", line 8, in <module> sys.exit(run()) File "/Users/xzk/.local/pipx/venvs/tox/lib/python3.9/site-packages/tox/run.py", line 19, in run result = main(sys.argv[1:] if args is None else args) File "/Users/xzk/.local/pipx/venvs/tox/lib/python3.9/site-packages/tox/run.py", line 45, in main result = handler(state) File "/Users/xzk/.local/pipx/venvs/tox/lib/python3.9/site-packages/tox/session/cmd/legacy.py", line 108, in legacy return devenv(state) File "/Users/xzk/.local/pipx/venvs/tox/lib/python3.9/site-packages/tox/session/cmd/devenv.py", line 38, in devenv state.conf.memory_seed_loaders[list(opt.env)[0]].append(loader) IndexError: list index out of range ``` ``` ╰─ which tox /Users/xzk/.local/bin/tox ``` Tried to execute with `tox` installed in my virtual environment `myenv` ``` which python3 ─╯ /Users/xzk/workplace/my_project/myenv/bin/python3 ``` ```console ╰─ python3 -m tox -vvvv --devenv venv-dev ROOT: 275 D setup logging to NOTSET on pid 20334 [tox/report.py:221] Traceback (most recent call last): File "/Users/xzk/opt/anaconda3/lib/python3.9/runpy.py", line 197, in _run_module_as_main return _run_code(code, main_globals, None, File "/Users/xzk/opt/anaconda3/lib/python3.9/runpy.py", line 87, in _run_code exec(code, run_globals) File "/Users/xzk/workplace/my_project/myenv/lib/python3.9/site-packages/tox/__main__.py", line 6, in <module> run() File "/Users/xzk/workplace/my_project/myenv/lib/python3.9/site-packages/tox/run.py", line 19, in run result = main(sys.argv[1:] if args is None else args) File "/Users/xzk/workplace/my_project/myenv/lib/python3.9/site-packages/tox/run.py", line 45, in main result = handler(state) File "/Users/xzk/workplace/my_project/myenv/lib/python3.9/site-packages/tox/session/cmd/legacy.py", line 108, in legacy return devenv(state) File "/Users/xzk/workplace/my_project/myenv/lib/python3.9/site-packages/tox/session/cmd/devenv.py", line 38, in devenv state.conf.memory_seed_loaders[list(opt.env)[0]].append(loader) IndexError: list index out of range ``` My `tox.ini` ``` [tox] envlist = py39 skip_missing_interpreters = True [testenv] deps = -e.[test] commands = pytest {posargs: ../tests/python_tests} ``` My setup.py ``` # setup.py from setuptools import setup from setuptools import find_packages setup( name='my_pkg', version='0.1.0', python_requires=">=3.8.0", packages=find_packages(), install_requires=["numpy>=1.24.2", "pandas>=1.5.3", "torch>=1.13.1", "tqdm>=4.64.1", "matplotlib>=3.6.3", "scikit-learn>=1.2.1", "pyarrow>=11.0.0"], extras_require=dict( test=[ 'pytest', ] ), ) ``` ## Environment Provide at least: - OS: MacOS Monterry - `pip list` of the host Python where `tox` is installed: ```console ╰─ pip list ─╯ Package Version ----------------- -------- attrs 22.2.0 cachetools 5.3.0 chardet 5.1.0 colorama 0.4.6 contourpy 1.0.7 cycler 0.11.0 distlib 0.3.6 exceptiongroup 1.1.0 filelock 3.9.0 fonttools 4.38.0 iniconfig 2.0.0 joblib 1.2.0 kiwisolver 1.4.4 matplotlib 3.6.3 numpy 1.24.2 packaging 23.0 pandas 1.5.3 Pillow 9.4.0 pip 23.0 platformdirs 3.0.0 pluggy 1.0.0 pyarrow 11.0.0 pyparsing 3.0.9 pyproject_api 1.5.0 pytest 7.2.1 pytest-mock 3.10.0 python-dateutil 2.8.2 pytz 2022.7.1 scikit-learn 1.2.1 scipy 1.10.0 setuptools 58.1.0 six 1.16.0 threadpoolctl 3.1.0 tomli 2.0.1 torch 1.13.1 tox 4.4.5 tqdm 4.64.1 typing_extensions 4.4.0 virtualenv 20.19. ``` ## Output of running tox Provide the output of `tox -rvv`: Please see above. ## Minimal example If possible, provide a minimal reproducer for the issue: ```console ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/session/cmd/test_legacy.py::test_legacy_devenv[empty]" ]
[ "tests/session/cmd/test_legacy.py::test_legacy_show_config", "tests/session/cmd/test_legacy.py::test_legacy_show_config_with_env", "tests/session/cmd/test_legacy.py::test_legacy_list_default[0]", "tests/session/cmd/test_legacy.py::test_legacy_list_default[1]", "tests/session/cmd/test_legacy.py::test_legacy_list_default[2]", "tests/session/cmd/test_legacy.py::test_legacy_list_env_with_empty_or_missing_env_list[missing", "tests/session/cmd/test_legacy.py::test_legacy_list_env_with_empty_or_missing_env_list[empty", "tests/session/cmd/test_legacy.py::test_legacy_list_all[0]", "tests/session/cmd/test_legacy.py::test_legacy_list_all[1]", "tests/session/cmd/test_legacy.py::test_legacy_list_all[2]", "tests/session/cmd/test_legacy.py::test_legacy_devenv[select]", "tests/session/cmd/test_legacy.py::test_legacy_run_parallel", "tests/session/cmd/test_legacy.py::test_legacy_run_sequential", "tests/session/cmd/test_legacy.py::test_legacy_help", "tests/session/cmd/test_legacy.py::test_legacy_cli_flags" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-05-13T17:54:07Z"
mit
tox-dev__tox-3024
diff --git a/docs/changelog/3024.feature.rst b/docs/changelog/3024.feature.rst new file mode 100644 index 00000000..d63fa00d --- /dev/null +++ b/docs/changelog/3024.feature.rst @@ -0,0 +1,2 @@ +Addded ``--list-dependencies`` and ``--no-list-dependencies`` CLI parameters. +If unspecified, defaults to listing when in CI, but not otherwise. diff --git a/src/tox/session/cmd/run/common.py b/src/tox/session/cmd/run/common.py index 49aeb87b..2846f8d9 100644 --- a/src/tox/session/cmd/run/common.py +++ b/src/tox/session/cmd/run/common.py @@ -22,6 +22,7 @@ from tox.session.cmd.run.single import ToxEnvRunResult, run_one from tox.session.state import State from tox.tox_env.api import ToxEnv from tox.tox_env.runner import RunToxEnv +from tox.util.ci import is_ci from tox.util.graph import stable_topological_sort from tox.util.spinner import MISS_DURATION, Spinner @@ -156,6 +157,19 @@ def env_run_create_flags(parser: ArgumentParser, mode: str) -> None: help="if recreate is set do not recreate packaging tox environment(s)", action="store_true", ) + list_deps = parser.add_mutually_exclusive_group() + list_deps.add_argument( + "--list-dependencies", + action="store_true", + default=is_ci(), + help="list the dependencies installed during environment setup", + ) + list_deps.add_argument( + "--no-list-dependencies", + action="store_false", + dest="list_dependencies", + help="never list the dependencies installed during environment setup", + ) if mode not in ("devenv", "config", "depends"): parser.add_argument( "--skip-pkg-install", diff --git a/src/tox/tox_env/python/api.py b/src/tox/tox_env/python/api.py index da9e435f..6e57626e 100644 --- a/src/tox/tox_env/python/api.py +++ b/src/tox/tox_env/python/api.py @@ -15,7 +15,6 @@ from virtualenv.discovery.py_spec import PythonSpec from tox.config.main import Config from tox.tox_env.api import ToxEnv, ToxEnvCreateArgs from tox.tox_env.errors import Fail, Recreate, Skip -from tox.util.ci import is_ci class VersionInfo(NamedTuple): @@ -227,12 +226,11 @@ class Python(ToxEnv, ABC): def _done_with_setup(self) -> None: """called when setup is done""" super()._done_with_setup() - running_in_ci = is_ci() - if self.journal or running_in_ci: + if self.journal or self.options.list_dependencies: outcome = self.installer.installed() if self.journal: self.journal["installed_packages"] = outcome - if running_in_ci: + if self.options.list_dependencies: logging.warning(",".join(outcome)) def python_cache(self) -> dict[str, Any]:
tox-dev/tox
716564208c1853f3256d20bf3c508652f1df70b8
diff --git a/tests/config/cli/test_cli_env_var.py b/tests/config/cli/test_cli_env_var.py index 61dbf962..1d83175e 100644 --- a/tests/config/cli/test_cli_env_var.py +++ b/tests/config/cli/test_cli_env_var.py @@ -10,6 +10,7 @@ from tox.config.loader.api import Override from tox.pytest import CaptureFixture, LogCaptureFixture, MonkeyPatch from tox.session.env_select import CliEnv from tox.session.state import State +from tox.util.ci import is_ci def test_verbose() -> None: @@ -63,6 +64,7 @@ def test_verbose_no_test() -> None: "factors": [], "labels": [], "skip_env": "", + "list_dependencies": is_ci(), } @@ -121,6 +123,7 @@ def test_env_var_exhaustive_parallel_values( "labels": [], "exit_and_dump_after": 0, "skip_env": "", + "list_dependencies": is_ci(), } assert options.parsed.verbosity == 4 assert options.cmd_handlers == core_handlers diff --git a/tests/config/cli/test_cli_ini.py b/tests/config/cli/test_cli_ini.py index c7324e1a..623a56f5 100644 --- a/tests/config/cli/test_cli_ini.py +++ b/tests/config/cli/test_cli_ini.py @@ -19,6 +19,7 @@ from tox.config.source import discover_source from tox.pytest import CaptureFixture, LogCaptureFixture, MonkeyPatch from tox.session.env_select import CliEnv from tox.session.state import State +from tox.util.ci import is_ci @pytest.fixture() @@ -102,6 +103,7 @@ def default_options() -> dict[str, Any]: "labels": [], "exit_and_dump_after": 0, "skip_env": "", + "list_dependencies": is_ci(), } @@ -139,6 +141,7 @@ def test_ini_exhaustive_parallel_values(core_handlers: dict[str, Callable[[State "labels": [], "exit_and_dump_after": 0, "skip_env": "", + "list_dependencies": is_ci(), } assert options.parsed.verbosity == 4 assert options.cmd_handlers == core_handlers diff --git a/tests/tox_env/python/test_python_api.py b/tests/tox_env/python/test_python_api.py index 3348acda..a9ff7522 100644 --- a/tests/tox_env/python/test_python_api.py +++ b/tests/tox_env/python/test_python_api.py @@ -218,9 +218,25 @@ def test_python_set_hash_seed_incorrect(tox_project: ToxProjectCreator) -> None: @pytest.mark.parametrize("in_ci", [True, False]) def test_list_installed_deps(in_ci: bool, tox_project: ToxProjectCreator, mocker: MockerFixture) -> None: - mocker.patch("tox.tox_env.python.api.is_ci", return_value=in_ci) + mocker.patch("tox.session.cmd.run.common.is_ci", return_value=in_ci) result = tox_project({"tox.ini": "[testenv]\nskip_install = true"}).run("r", "-e", "py") if in_ci: assert "pip==" in result.out else: assert "pip==" not in result.out + + [email protected]("list_deps", ["--list-dependencies", "--no-list-dependencies"]) [email protected]("in_ci", [True, False]) +def test_list_installed_deps_explicit_cli( + list_deps: str, + in_ci: bool, + tox_project: ToxProjectCreator, + mocker: MockerFixture, +) -> None: + mocker.patch("tox.session.cmd.run.common.is_ci", return_value=in_ci) + result = tox_project({"tox.ini": "[testenv]\nskip_install = true"}).run(list_deps, "r", "-e", "py") + if list_deps == "--list-dependencies": + assert "pip==" in result.out + else: + assert "pip==" not in result.out
Allow CI runs to disable listing dependencies ## What's the problem this feature will solve? <!-- What are you trying to do, that you are unable to achieve with tox as it currently stands? --> Right now CI runs always list dependencies for each environment. While this is often nice, it can result in excessive output, especially when using tox to run linters and other tools. ## Describe the solution you'd like <!-- Clear and concise description of what you want to happen. --> If `list_dependencies_command` is explicitly set as an empty string for an environment, skip the listing dependencies command. <!-- Provide examples of real world use cases that this would enable and how it solves the problem described above. --> ## Alternative Solutions <!-- Have you tried to workaround the problem using tox or other tools? Or a different approach to solving this issue? Please elaborate here. --> One alternative would be a separate configuration option for an environment to specifically disable listing dependencies. ## Additional context <!-- Add any other context, links, etc. about the feature here. --> snapcraft and several other projects use tox to run our linters. However, when running in CI, it can be difficult to find specific linting failures between all the `pip freeze` lines: ![image](https://github.com/tox-dev/tox/assets/4305943/1a51f38a-74ec-4fac-925d-736db96b4301)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/config/cli/test_cli_env_var.py::test_verbose_no_test", "tests/config/cli/test_cli_env_var.py::test_env_var_exhaustive_parallel_values", "tests/config/cli/test_cli_ini.py::test_ini_empty[[tox]]", "tests/config/cli/test_cli_ini.py::test_ini_empty[]", "tests/config/cli/test_cli_ini.py::test_ini_exhaustive_parallel_values", "tests/config/cli/test_cli_ini.py::test_bad_cli_ini", "tests/config/cli/test_cli_ini.py::test_bad_option_cli_ini", "tests/tox_env/python/test_python_api.py::test_list_installed_deps[True]", "tests/tox_env/python/test_python_api.py::test_list_installed_deps[False]", "tests/tox_env/python/test_python_api.py::test_list_installed_deps_explicit_cli[True---list-dependencies]", "tests/tox_env/python/test_python_api.py::test_list_installed_deps_explicit_cli[True---no-list-dependencies]", "tests/tox_env/python/test_python_api.py::test_list_installed_deps_explicit_cli[False---list-dependencies]", "tests/tox_env/python/test_python_api.py::test_list_installed_deps_explicit_cli[False---no-list-dependencies]" ]
[ "tests/config/cli/test_cli_env_var.py::test_verbose", "tests/config/cli/test_cli_env_var.py::test_verbose_compound", "tests/config/cli/test_cli_env_var.py::test_ini_help", "tests/config/cli/test_cli_env_var.py::test_bad_env_var", "tests/config/cli/test_cli_ini.py::test_ini_help", "tests/config/cli/test_cli_ini.py::test_cli_ini_with_interpolated", "tests/config/cli/test_cli_ini.py::test_conf_arg[ini-dir]", "tests/config/cli/test_cli_ini.py::test_conf_arg[ini]", "tests/config/cli/test_cli_ini.py::test_conf_arg[cfg-dir]", "tests/config/cli/test_cli_ini.py::test_conf_arg[cfg]", "tests/config/cli/test_cli_ini.py::test_conf_arg[toml-dir]", "tests/config/cli/test_cli_ini.py::test_conf_arg[toml]", "tests/tox_env/python/test_python_api.py::test_requirements_txt", "tests/tox_env/python/test_python_api.py::test_conflicting_base_python_factor", "tests/tox_env/python/test_python_api.py::test_diff_msg_added_removed_changed", "tests/tox_env/python/test_python_api.py::test_diff_msg_no_diff", "tests/tox_env/python/test_python_api.py::test_extract_base_python[py3-py3]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[py311-py311]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[py3.12-py3.12]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[pypy2-pypy2]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[rustpython3-rustpython3]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[cpython3.8-cpython3.8]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[ironpython2.7-ironpython2.7]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[functional-py310-py310]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[bar-pypy2-foo-pypy2]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[py-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[django-32-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[eslint-8.3-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[py-310-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[py3000-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[4.foo-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[310-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[5-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[2000-None]", "tests/tox_env/python/test_python_api.py::test_extract_base_python[4000-None]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[magic-pypy-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[magic-pypy-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[magic-py39-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_no_conflict[magic-py39-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[pypy-cpython-pypy-cpython-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[pypy-cpython-pypy-cpython-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[pypy2-pypy3-pypy2-pypy3-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[pypy2-pypy3-pypy2-pypy3-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py3-py2-py3-py2-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py3-py2-py3-py2-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py38-py39-py38-py39-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py38-py39-py38-py39-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py38-py38|py39-py38-py39-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py38-py38|py39-py38-py39-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py38-python3-py38-python3-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py38-python3-py38-python3-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py310-py38|py39-py310-py38|py39-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py310-py38|py39-py310-py38|py39-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py3.11-py310-py3.11-py310-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py3.11-py310-py3.11-py310-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py310-magic-py39-py310-py39-True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict[py310-magic-py39-py310-py39-False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict_show_conf[True]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict_show_conf[False]", "tests/tox_env/python/test_python_api.py::test_base_python_env_conflict_show_conf[None]", "tests/tox_env/python/test_python_api.py::test_python_set_hash_seed", "tests/tox_env/python/test_python_api.py::test_python_generate_hash_seed", "tests/tox_env/python/test_python_api.py::test_python_keep_hash_seed", "tests/tox_env/python/test_python_api.py::test_python_disable_hash_seed", "tests/tox_env/python/test_python_api.py::test_python_set_hash_seed_negative", "tests/tox_env/python/test_python_api.py::test_python_set_hash_seed_incorrect" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-06-03T01:18:53Z"
mit
tox-dev__tox-3123
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e78e4b05..c93da511 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/psf/black - rev: 23.7.0 + rev: 23.9.1 hooks: - id: black - repo: https://github.com/codespell-project/codespell diff --git a/docs/changelog/3084.bugfix.rst b/docs/changelog/3084.bugfix.rst new file mode 100644 index 00000000..687acbd1 --- /dev/null +++ b/docs/changelog/3084.bugfix.rst @@ -0,0 +1,1 @@ +Fix ``quickstart`` command from requiring ``root`` positional argument diff --git a/src/tox/session/cmd/quickstart.py b/src/tox/session/cmd/quickstart.py index 30802399..575a5223 100644 --- a/src/tox/session/cmd/quickstart.py +++ b/src/tox/session/cmd/quickstart.py @@ -27,6 +27,7 @@ def tox_add_option(parser: ToxParser) -> None: "quickstart_root", metavar="root", default=Path().absolute(), + nargs="?", help="folder to create the tox.ini file", type=Path, )
tox-dev/tox
0799354d44a1585b89240d424309faff8f25a7ed
diff --git a/tests/session/cmd/test_quickstart.py b/tests/session/cmd/test_quickstart.py index 6effff29..f4672caf 100644 --- a/tests/session/cmd/test_quickstart.py +++ b/tests/session/cmd/test_quickstart.py @@ -53,3 +53,8 @@ def test_quickstart_refuse(tox_project: ToxProjectCreator) -> None: def test_quickstart_help(tox_project: ToxProjectCreator) -> None: outcome = tox_project({"tox.ini": ""}).run("q", "-h") outcome.assert_success() + + +def test_quickstart_no_args(tox_project: ToxProjectCreator) -> None: + outcome = tox_project({}).run("q") + outcome.assert_success()
tox quickstart: error: the following arguments are required: root ## Issue I ran `tox quickstart` as described in https://tox.wiki/en/latest/user_guide.html. and get the error `tox quickstart: error: the following arguments are required: root`. It seems that either the default parameter that should select the current working environment seems to be broken or the documentation should be updated. ## Environment Provide at least: - OS: Windows 11 <details open> <summary>Output of <code>pip list</code> of the host Python, where <code>tox</code> is installed</summary> ```console Python version: 3.11 Packages: No idea, I installed with pipx ``` </details> Thanks a lot for developing this super useful library!
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/session/cmd/test_quickstart.py::test_quickstart_no_args" ]
[ "tests/session/cmd/test_quickstart.py::test_quickstart_ok", "tests/session/cmd/test_quickstart.py::test_quickstart_refuse", "tests/session/cmd/test_quickstart.py::test_quickstart_help" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2023-09-11T17:14:10Z"
mit
tox-dev__tox-3159
diff --git a/docs/changelog/3158.bugfix.rst b/docs/changelog/3158.bugfix.rst new file mode 100644 index 00000000..e7391238 --- /dev/null +++ b/docs/changelog/3158.bugfix.rst @@ -0,0 +1,1 @@ +``--parallel-no-spinner`` flag now implies ``--parallel`` diff --git a/docs/user_guide.rst b/docs/user_guide.rst index d08be62a..73a475df 100644 --- a/docs/user_guide.rst +++ b/docs/user_guide.rst @@ -394,8 +394,8 @@ Parallel mode - ``auto`` to limit it to CPU count, - or pass an integer to set that limit. - Parallel mode displays a progress spinner while running tox environments in parallel, and reports outcome of these as - soon as they have been completed with a human readable duration timing attached. This spinner can be disabled via the - ``--parallel-no-spinner`` flag. + soon as they have been completed with a human readable duration timing attached. To run parallelly without the spinner, + you can use the ``--parallel-no-spinner`` flag. - Parallel mode by default shows output only of failed environments and ones marked as :ref:`parallel_show_output` ``=True``. - There's now a concept of dependency between environments (specified via :ref:`depends`), tox will re-order the diff --git a/src/tox/session/cmd/legacy.py b/src/tox/session/cmd/legacy.py index 92a91fcf..a78d8bac 100644 --- a/src/tox/session/cmd/legacy.py +++ b/src/tox/session/cmd/legacy.py @@ -110,7 +110,7 @@ def legacy(state: State) -> int: option.env = CliEnv(["py"]) option.devenv_path = Path(option.devenv_path) return devenv(state) - if option.parallel != 0: # only 0 means sequential + if option.parallel_no_spinner is True or option.parallel != 0: # only 0 means sequential return run_parallel(state) return run_sequential(state) diff --git a/src/tox/session/cmd/run/parallel.py b/src/tox/session/cmd/run/parallel.py index 9b7e2843..d02eb1f0 100644 --- a/src/tox/session/cmd/run/parallel.py +++ b/src/tox/session/cmd/run/parallel.py @@ -74,7 +74,7 @@ def parallel_flags( "--parallel-no-spinner", action="store_true", dest="parallel_no_spinner", - help="do not show the spinner", + help="run tox environments in parallel, but don't show the spinner, implies --parallel", ) @@ -83,7 +83,7 @@ def run_parallel(state: State) -> int: option = state.conf.options return execute( state, - max_workers=option.parallel, + max_workers=None if option.parallel_no_spinner is True else option.parallel, has_spinner=option.parallel_no_spinner is False and option.parallel_live is False, live=option.parallel_live, )
tox-dev/tox
ddb006f80182f40647aa2c2cd4d4928b2e136396
diff --git a/tests/session/cmd/test_parallel.py b/tests/session/cmd/test_parallel.py index 8ab93a78..a546a263 100644 --- a/tests/session/cmd/test_parallel.py +++ b/tests/session/cmd/test_parallel.py @@ -6,9 +6,11 @@ from signal import SIGINT from subprocess import PIPE, Popen from time import sleep from typing import TYPE_CHECKING +from unittest import mock import pytest +from tox.session.cmd.run import parallel from tox.session.cmd.run.parallel import parse_num_processes from tox.tox_env.api import ToxEnv from tox.tox_env.errors import Fail @@ -169,3 +171,28 @@ def test_parallel_requires_arg(tox_project: ToxProjectCreator) -> None: outcome = tox_project({"tox.ini": ""}).run("p", "-p", "-h") outcome.assert_failed() assert "argument -p/--parallel: expected one argument" in outcome.err + + +def test_parallel_no_spinner(tox_project: ToxProjectCreator) -> None: + """Ensure passing `--parallel-no-spinner` implies `--parallel`.""" + with mock.patch.object(parallel, "execute") as mocked: + tox_project({"tox.ini": ""}).run("p", "--parallel-no-spinner") + + mocked.assert_called_once_with( + mock.ANY, + max_workers=None, + has_spinner=False, + live=False, + ) + + +def test_parallel_no_spinner_legacy(tox_project: ToxProjectCreator) -> None: + with mock.patch.object(parallel, "execute") as mocked: + tox_project({"tox.ini": ""}).run("--parallel-no-spinner") + + mocked.assert_called_once_with( + mock.ANY, + max_workers=None, + has_spinner=False, + live=False, + )
`--parallel-no-spinner` should imply `--parallel` ## Issue The flag `--parallel-no-spinner` reads like "the tests should run parallel, but don't show spinner". But if you just pass `--parallel-no-spinner` without also passing `--parallel`, it does nothing. I think the presence of this flag implies that we want to run the tests parallelly.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/session/cmd/test_parallel.py::test_parallel_no_spinner", "tests/session/cmd/test_parallel.py::test_parallel_no_spinner_legacy" ]
[ "tests/session/cmd/test_parallel.py::test_parse_num_processes_all", "tests/session/cmd/test_parallel.py::test_parse_num_processes_auto", "tests/session/cmd/test_parallel.py::test_parse_num_processes_exact", "tests/session/cmd/test_parallel.py::test_parse_num_processes_not_number", "tests/session/cmd/test_parallel.py::test_parse_num_processes_minus_one", "tests/session/cmd/test_parallel.py::test_parallel_general", "tests/session/cmd/test_parallel.py::test_parallel_run_live_out", "tests/session/cmd/test_parallel.py::test_parallel_show_output_with_pkg", "tests/session/cmd/test_parallel.py::test_keyboard_interrupt", "tests/session/cmd/test_parallel.py::test_parallels_help", "tests/session/cmd/test_parallel.py::test_parallel_legacy_accepts_no_arg", "tests/session/cmd/test_parallel.py::test_parallel_requires_arg" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-11-16T19:18:52Z"
mit
tox-dev__tox-conda-144
diff --git a/tox_conda/plugin.py b/tox_conda/plugin.py index 8cfa2e4..d669718 100644 --- a/tox_conda/plugin.py +++ b/tox_conda/plugin.py @@ -160,14 +160,17 @@ def tox_testenv_create(venv, action): python = get_py_version(venv.envconfig, action) if venv.envconfig.conda_env is not None: + env_path = Path(venv.envconfig.conda_env) # conda env create does not have a --channel argument nor does it take # dependencies specifications (e.g., python=3.8). These must all be specified # in the conda-env.yml file yaml = YAML() - env_file = yaml.load(Path(venv.envconfig.conda_env)) + env_file = yaml.load(env_path) env_file["dependencies"].append(python) - with tempfile.NamedTemporaryFile(suffix=".yaml") as tmp_env: + with tempfile.NamedTemporaryFile( + dir=env_path.parent, prefix="tox_conda_tmp", suffix=".yaml" + ) as tmp_env: yaml.dump(env_file, tmp_env) args = [
tox-dev/tox-conda
041bd82aafe91f575a4877aec40233e4015a3f8f
diff --git a/tests/test_conda_env.py b/tests/test_conda_env.py index 6ef26ef..4435c15 100644 --- a/tests/test_conda_env.py +++ b/tests/test_conda_env.py @@ -281,6 +281,8 @@ def test_conda_env(tmpdir, newconfig, mocksession): with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) + mock_file.assert_called_with(dir=tmpdir, prefix="tox_conda_tmp", suffix=".yaml") + pcalls = mocksession._pcalls assert len(pcalls) >= 1 call = pcalls[-1] @@ -291,8 +293,6 @@ def test_conda_env(tmpdir, newconfig, mocksession): assert call.args[5].startswith("--file") assert cmd[6] == str(mock_file().name) - mock_file.assert_any_call(suffix=".yaml") - yaml = YAML() tmp_env = yaml.load(mock_open_to_string(mock_file)) assert tmp_env["dependencies"][-1].startswith("python=") @@ -338,6 +338,8 @@ def test_conda_env_and_spec(tmpdir, newconfig, mocksession): with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) + mock_file.assert_called_with(dir=tmpdir, prefix="tox_conda_tmp", suffix=".yaml") + pcalls = mocksession._pcalls assert len(pcalls) >= 1 call = pcalls[-1] @@ -348,8 +350,6 @@ def test_conda_env_and_spec(tmpdir, newconfig, mocksession): assert call.args[5].startswith("--file") assert cmd[6] == str(mock_file().name) - mock_file.assert_any_call(suffix=".yaml") - yaml = YAML() tmp_env = yaml.load(mock_open_to_string(mock_file)) assert tmp_env["dependencies"][-1].startswith("python=")
Behavior for relative paths in `environment.yml` changed. Hi, thanks for the package! Unfortunately, the new release broke my setup. The folder structure looks roughly like this ``` - docs - rtd_environment.yml - tox.ini - src - package ``` I use `rtd_environment.yml` in my `tox.ini` to create the environment for the documentation. The env file contains a pip section to install the package from the repo. ``` ... - dependencies: ... - pip: - -e ../ ``` With v0.10.0 the pip command fails. It says the directory does not contain a `setup.py` or `pyproject.toml` . Everything works with v0.9.2. The issue can be reproduced in this repo (https://github.com/pytask-dev/pytask/tree/e3c1a4cf6935a0c5cf14267acf9b5597261b3a7f) by typing `tox -e sphinx`.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_conda_env.py::test_conda_env", "tests/test_conda_env.py::test_conda_env_and_spec" ]
[ "tests/test_conda_env.py::test_conda_create", "tests/test_conda_env.py::test_install_deps_no_conda", "tests/test_conda_env.py::test_install_conda_deps", "tests/test_conda_env.py::test_install_conda_no_pip", "tests/test_conda_env.py::test_update", "tests/test_conda_env.py::test_conda_spec", "tests/test_conda_env.py::test_empty_conda_spec_and_env", "tests/test_conda_env.py::test_conda_install_args", "tests/test_conda_env.py::test_conda_create_args", "tests/test_conda_env.py::test_verbosity" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2022-10-31T10:16:13Z"
mit
tox-dev__tox-conda-161
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 762d974..4592cf4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ repos: args: - --py3-plus - repo: https://github.com/PyCQA/isort - rev: 5.11.4 + rev: 5.12.0 hooks: - id: isort - repo: https://github.com/psf/black diff --git a/tox.ini b/tox.ini index ff58268..16db4ba 100644 --- a/tox.ini +++ b/tox.ini @@ -73,7 +73,7 @@ depends = [testenv:pkg_meta] description = check that the long description is valid -basepython = python3.9 +basepython = python3.10 skip_install = true deps = build>=0.0.4 diff --git a/tox_conda/plugin.py b/tox_conda/plugin.py index d669718..f93631f 100644 --- a/tox_conda/plugin.py +++ b/tox_conda/plugin.py @@ -168,22 +168,27 @@ def tox_testenv_create(venv, action): env_file = yaml.load(env_path) env_file["dependencies"].append(python) - with tempfile.NamedTemporaryFile( - dir=env_path.parent, prefix="tox_conda_tmp", suffix=".yaml" - ) as tmp_env: - yaml.dump(env_file, tmp_env) - - args = [ - venv.envconfig.conda_exe, - "env", - "create", - "-p", - envdir, - "--file", - tmp_env.name, - ] - - _run_conda_process(args, venv, action, basepath) + tmp_env = tempfile.NamedTemporaryFile( + dir=env_path.parent, + prefix="tox_conda_tmp", + suffix=".yaml", + delete=False, + ) + yaml.dump(env_file, tmp_env) + + args = [ + venv.envconfig.conda_exe, + "env", + "create", + "-p", + envdir, + "--file", + tmp_env.name, + ] + tmp_env.close() + _run_conda_process(args, venv, action, basepath) + Path(tmp_env.name).unlink() + else: args = [venv.envconfig.conda_exe, "create", "--yes", "-p", envdir] for channel in venv.envconfig.conda_channels:
tox-dev/tox-conda
099ec510c545581f8ff2efaac8616f232c5cf66a
diff --git a/tests/test_conda_env.py b/tests/test_conda_env.py index 4435c15..1756846 100644 --- a/tests/test_conda_env.py +++ b/tests/test_conda_env.py @@ -1,5 +1,6 @@ import io import os +import pathlib import re from unittest.mock import mock_open, patch @@ -278,10 +279,12 @@ def test_conda_env(tmpdir, newconfig, mocksession): mock_file = mock_open() with patch("tox_conda.plugin.tempfile.NamedTemporaryFile", mock_file): - with mocksession.newaction(venv.name, "getenv") as action: - tox_testenv_create(action=action, venv=venv) + with patch.object(pathlib.Path, "unlink", autospec=True) as mock_unlink: + with mocksession.newaction(venv.name, "getenv") as action: + tox_testenv_create(action=action, venv=venv) + mock_unlink.assert_called_once - mock_file.assert_called_with(dir=tmpdir, prefix="tox_conda_tmp", suffix=".yaml") + mock_file.assert_called_with(dir=tmpdir, prefix="tox_conda_tmp", suffix=".yaml", delete=False) pcalls = mocksession._pcalls assert len(pcalls) >= 1 @@ -335,10 +338,12 @@ def test_conda_env_and_spec(tmpdir, newconfig, mocksession): mock_file = mock_open() with patch("tox_conda.plugin.tempfile.NamedTemporaryFile", mock_file): - with mocksession.newaction(venv.name, "getenv") as action: - tox_testenv_create(action=action, venv=venv) + with patch.object(pathlib.Path, "unlink", autospec=True) as mock_unlink: + with mocksession.newaction(venv.name, "getenv") as action: + tox_testenv_create(action=action, venv=venv) + mock_unlink.assert_called_once - mock_file.assert_called_with(dir=tmpdir, prefix="tox_conda_tmp", suffix=".yaml") + mock_file.assert_called_with(dir=tmpdir, prefix="tox_conda_tmp", suffix=".yaml", delete=False) pcalls = mocksession._pcalls assert len(pcalls) >= 1
Permission error on conda environment temp file when running in Windows OS I am receiving the following error (in .tox/python/log/python-0.log) when running tox. ```python # Error snippet File "C:\Users\xxx\Anaconda3\lib\site-packages\conda_env\env.py", line 163, in from_file with open(filename, 'rb') as fp: PermissionError: [Errno 13] Permission denied: 'C:\\Users\\yyy\\tox_conda_tmp13zv20n4.yaml' `$ C:\Users\xxx\Anaconda3\Scripts\conda-env-script.py create -p python --file C:\Users\yyy\tox_conda_tmp13zv20n4.yaml` ``` ### Steps to reproduce the error on Window OS (Windows 10). #### tox.ini ``` [tox] envlist = py310 skipsdist = True [testenv] conda_env = environment.yaml ``` #### environment.yaml ``` name: tox_environment dependencies: - python=3.10 - pytest=7.1.2 - tox=3.27.1 - tox-conda=0.10.1 ``` #### run the following commands on Windows OS ```bash # create conda env conda env create -f environment.yaml conda activate tox_environment # run tox tox ``` ### Suspected cause of error I suspect that the error might be due to the use of tempfile.NamedTemporaryFile in [plugins.py](https://github.com/tox-dev/tox-conda/blob/main/tox_conda/plugin.py#L171). As per [docs](https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile): > That name can be retrieved from the name attribute of the returned file-like object. Whether the name can be used to open the file a second time, while the named temporary file is still open, varies across platforms (it can be so used on Unix; it cannot on Windows) Any help is appreciated, thanks!
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_conda_env.py::test_conda_env", "tests/test_conda_env.py::test_conda_env_and_spec" ]
[ "tests/test_conda_env.py::test_conda_create", "tests/test_conda_env.py::test_install_deps_no_conda", "tests/test_conda_env.py::test_install_conda_deps", "tests/test_conda_env.py::test_install_conda_no_pip", "tests/test_conda_env.py::test_update", "tests/test_conda_env.py::test_conda_spec", "tests/test_conda_env.py::test_empty_conda_spec_and_env", "tests/test_conda_env.py::test_conda_install_args", "tests/test_conda_env.py::test_conda_create_args", "tests/test_conda_env.py::test_verbosity" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2023-02-01T09:25:36Z"
mit
tox-dev__tox-docker-19
diff --git a/README.md b/README.md index c99702c..efd4f4f 100644 --- a/README.md +++ b/README.md @@ -31,17 +31,24 @@ your test suite as it runs, as ordinary environment variables: POSTGRES_USER=username POSTGRES_DB=dbname -## Port Mapping +## Host and Port Mapping tox-docker runs docker with the "publish all ports" option. Any port the container exposes will be made available to your test suite via environment -variables of the form `<image-basename>_<exposed-port>_<proto>`. For +variables of the form `<image-basename>_<exposed-port>_<protocol>_PORT`. For instance, for the postgresql container, there will be an environment -variable `POSTGRES_5432_TCP` whose value is the ephemeral port number that -docker has bound the container's port 5432 to. +variable `POSTGRES_5432_TCP_PORT` whose value is the ephemeral port number +that docker has bound the container's port 5432 to. Likewise, exposed UDP ports will have environment variables like -`TELEGRAF_8092_UDP` whose value is the ephemeral port number that docker has -bound. NB! Since it's not possible to check whether UDP port is open it's -just mapping to environment variable without any checks that service up and -running. +`TELEGRAF_8092_UDP_PORT` Since it's not possible to check whether UDP port +is open it's just mapping to environment variable without any checks that +service up and running. + +The host name for each service is also exposed via environment as +`<image-basename>_HOST`, which is `POSTGRES_HOST` and `TELEGRAF_HOST` for +the two examples above. + +*Deprecation Note:* In older versions of tox-docker, the port was exposed as +`<image-basename>-<exposed-port>-<protocol>`. This additional environment +variable is deprecated, but will be supported until tox-docker 2.0. diff --git a/tox.ini b/tox.ini index cafee7d..f20bc70 100644 --- a/tox.ini +++ b/tox.ini @@ -1,11 +1,18 @@ [tox] -envlist = py27 +envlist = integration,registry -[testenv] +[testenv:integration] docker = nginx:1.13-alpine - telegraf:1.8-alpine + ksdn117/tcp-udp-test dockerenv = ENV_VAR=env-var-value deps = pytest commands = py.test [] test_integration.py + +[testenv:registry] +docker = docker.io/library/nginx:1.13-alpine +dockerenv = + ENV_VAR=env-var-value +deps = pytest +commands = py.test [] test_registry.py diff --git a/tox_docker.py b/tox_docker.py index a120a61..8095735 100644 --- a/tox_docker.py +++ b/tox_docker.py @@ -7,6 +7,28 @@ from docker.errors import ImageNotFound import docker as docker_module +def escape_env_var(varname): + """ + Convert a string to a form suitable for use as an environment variable. + + The result will be all uppercase, and will have all invalid characters + replaced by an underscore. + + The result will match the following regex: [a-zA-Z_][a-zA-Z0-9_]* + + Example: + "my.private.registry/cat/image" will become + "MY_PRIVATE_REGISTRY_CAT_IMAGE" + """ + varname = list(varname.upper()) + if not varname[0].isalpha(): + varname[0] = '_' + for i, c in enumerate(varname): + if not c.isalnum() and c != '_': + varname[i] = '_' + return "".join(varname) + + def _newaction(venv, message): try: # tox 3.7 and later @@ -62,20 +84,33 @@ def tox_runtest_pre(venv): conf._docker_containers.append(container) container.reload() + gateway_ip = container.attrs["NetworkSettings"]["Gateway"] or "0.0.0.0" for containerport, hostports in container.attrs["NetworkSettings"]["Ports"].items(): - hostport = None + for spec in hostports: if spec["HostIp"] == "0.0.0.0": hostport = spec["HostPort"] break - - if not hostport: + else: continue - envvar = "{}_{}".format( - name.upper(), - containerport.replace("/", "_").upper(), - ) + envvar = escape_env_var("{}_HOST".format( + name, + )) + venv.envconfig.setenv[envvar] = gateway_ip + + envvar = escape_env_var("{}_{}_PORT".format( + name, + containerport, + )) + venv.envconfig.setenv[envvar] = hostport + + # TODO: remove in 2.0 + _, proto = containerport.split("/") + envvar = escape_env_var("{}_{}".format( + name, + containerport, + )) venv.envconfig.setenv[envvar] = hostport _, proto = containerport.split("/") @@ -88,7 +123,7 @@ def tox_runtest_pre(venv): while (time.time() - start) < 30: try: sock = socket.create_connection( - address=("0.0.0.0", int(hostport)), + address=(gateway_ip, int(hostport)), timeout=0.1, ) except socket.error:
tox-dev/tox-docker
c571732e0c606a1cde123bf6899a7c246ba2e44e
diff --git a/test_integration.py b/test_integration.py index 2c672fe..4a0be70 100644 --- a/test_integration.py +++ b/test_integration.py @@ -1,6 +1,10 @@ import os import unittest -import urllib2 + +try: + from urllib.request import urlopen +except ImportError: + from urllib2 import urlopen class ToxDockerIntegrationTest(unittest.TestCase): @@ -12,13 +16,30 @@ class ToxDockerIntegrationTest(unittest.TestCase): def test_it_sets_automatic_env_vars(self): # the nginx image we use exposes port 80 + self.assertIn("NGINX_HOST", os.environ) self.assertIn("NGINX_80_TCP", os.environ) - # the telegraf image we use exposes UDP port 8092 - self.assertIn("TELEGRAF_8092_UDP", os.environ) + self.assertIn("NGINX_80_TCP_PORT", os.environ) + self.assertEqual( + os.environ["NGINX_80_TCP_PORT"], + os.environ["NGINX_80_TCP"], + ) + + # the test image we use exposes TCP port 1234 and UDP port 5678 + self.assertIn("KSDN117_TCP_UDP_TEST_1234_TCP", os.environ) + self.assertIn("KSDN117_TCP_UDP_TEST_1234_TCP_PORT", os.environ) + self.assertEqual( + os.environ["KSDN117_TCP_UDP_TEST_1234_TCP_PORT"], + os.environ["KSDN117_TCP_UDP_TEST_1234_TCP"], + ) + self.assertIn("KSDN117_TCP_UDP_TEST_5678_UDP_PORT", os.environ) + self.assertEqual( + os.environ["KSDN117_TCP_UDP_TEST_5678_UDP_PORT"], + os.environ["KSDN117_TCP_UDP_TEST_5678_UDP"], + ) def test_it_exposes_the_port(self): # the nginx image we use exposes port 80 - url = "http://127.0.0.1:{port}/".format(port=os.environ["NGINX_80_TCP"]) - response = urllib2.urlopen(url) + url = "http://{host}:{port}/".format(host=os.environ["NGINX_HOST"], port=os.environ["NGINX_80_TCP"]) + response = urlopen(url) self.assertEqual(200, response.getcode()) - self.assertIn("Thank you for using nginx.", response.read()) + self.assertIn("Thank you for using nginx.", str(response.read())) diff --git a/test_registry.py b/test_registry.py new file mode 100644 index 0000000..4884f36 --- /dev/null +++ b/test_registry.py @@ -0,0 +1,18 @@ +import os +import unittest + +from tox_docker import escape_env_var + + +class ToxDockerRegistryTest(unittest.TestCase): + + def test_it_sets_automatic_env_vars(self): + # the nginx image we use exposes port 80 + self.assertIn("DOCKER_IO_LIBRARY_NGINX_HOST", os.environ) + self.assertIn("DOCKER_IO_LIBRARY_NGINX_80_TCP", os.environ) + + def test_escape_env_var(self): + self.assertEqual( + escape_env_var("my.private.registry/cat/image"), + "MY_PRIVATE_REGISTRY_CAT_IMAGE", + )
support for remote docker hosts Docker itself could run on a remote machine and docker would use DOCKER_HOST variable to connect to it. Still, based on the fact that no IP address is returned in enviornment variables I suppose that tox-docker would not be able to work in this case. This is a serious isse as now docker perfectly supports ssh protocol, and user can easily do `DOCKER_HOST=ssh://root@remote`. ``` File "/Users/ssbarnea/.local/lib/python2.7/site-packages/pluggy/callers.py", line 81, in get_result _reraise(*ex) # noqa File "/Users/ssbarnea/.local/lib/python2.7/site-packages/pluggy/callers.py", line 187, in _multicall res = hook_impl.function(*args) File "/Users/ssbarnea/os/tox-docker/tox_docker.py", line 94, in tox_runtest_pre "Never got answer on port {} from {}".format(containerport, name) Exception: Never got answer on port 8080/tcp from gerritcodereview/gerrit tox -e py27 3.33s user 2.75s system 5% cpu 1:44.68 total ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test_registry.py::ToxDockerRegistryTest::test_escape_env_var" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-05-14T13:57:01Z"
bsd-3-clause
treasure-data__td-client-python-21
diff --git a/tdclient/client.py b/tdclient/client.py index 691b225..85c2258 100644 --- a/tdclient/client.py +++ b/tdclient/client.py @@ -527,10 +527,7 @@ class Client(object): [:class:`tdclient.models.Schedule`] """ result = self.api.list_schedules() - def schedule(m): - name,cron,query,database,result_url,timezone,delay,next_time,priority,retry_limit,org_name = m - return models.Schedule(self, name, cron, query, database, result_url, timezone, delay, next_time, priority, retry_limit, org_name) - return [ schedule(m) for m in result ] + return [ models.Schedule(self, m.get("name"), m.get("cron"), m.get("query"), **m) for m in result ] def update_schedule(self, name, params=None): """ diff --git a/tdclient/schedule_api.py b/tdclient/schedule_api.py index 8d9ec3b..02e7106 100644 --- a/tdclient/schedule_api.py +++ b/tdclient/schedule_api.py @@ -50,17 +50,12 @@ class ScheduleAPI(object): self.raise_error("List schedules failed", res, body) js = self.checked_json(body, ["schedules"]) def schedule(m): - name = m.get("name") - cron = m.get("cron") - query = m.get("query") - database = m.get("database") - result_url = m.get("result") - timezone = m.get("timezone", "UTC") - delay = m.get("delay") - next_time = self._parsedate(self.get_or_else(m, "next_time", "1970-01-01T00:00:00Z"), "%Y-%m-%dT%H:%M:%SZ") - priority = m.get("priority") - retry_limit = m.get("retry_limit") - return (name, cron, query, database, result_url, timezone, delay, next_time, priority, retry_limit, None) # same as database + m = dict(m) + if "timezone" not in m: + m["timezone"] = "UTC" + m["created_at"] = self._parsedate(self.get_or_else(m, "created_at", "1970-01-01T00:00:00Z"), "%Y-%m-%dT%H:%M:%SZ") + m["next_time"] = self._parsedate(self.get_or_else(m, "next_time", "1970-01-01T00:00:00Z"), "%Y-%m-%dT%H:%M:%SZ") + return m return [ schedule(m) for m in js["schedules"] ] def update_schedule(self, name, params=None): diff --git a/tdclient/schedule_model.py b/tdclient/schedule_model.py index 104550d..888ae08 100644 --- a/tdclient/schedule_model.py +++ b/tdclient/schedule_model.py @@ -24,19 +24,27 @@ class Schedule(Model): """Schedule on Treasure Data Service """ - def __init__(self, client, name, cron, query, database=None, result_url=None, timezone=None, delay=None, next_time=None, priority=None, retry_limit=None, org_name=None): + def __init__(self, client, name, cron, query, **kwargs): super(Schedule, self).__init__(client) self._name = name self._cron = cron + self._timezone = kwargs.get("timezone") + self._delay = kwargs.get("delay") + self._created_at = kwargs.get("created_at") + self._type = kwargs.get("type") self._query = query - self._database = database - self._result_url = result_url - self._timezone = timezone - self._delay = delay - self._next_time = next_time - self._priority = priority - self._retry_limit = retry_limit - self._org_name = org_name + self._database = kwargs.get("database") + self._user_name = kwargs.get("user_name") + self._priority = kwargs.get("priority") + self._retry_limit = kwargs.get("retry_limit") + if "result_url" in kwargs: + # backward compatibility for td-client-python < 0.6.0 + # TODO: remove this code if not necessary with fixing test + self._result = kwargs.get("result_url") + else: + self._result = kwargs.get("result") + self._next_time = kwargs.get("next_time") + self._org_name = kwargs.get("org_name") @property def name(self): @@ -68,7 +76,7 @@ class Schedule(Model): def result_url(self): """The result output configuration in URL form of a scheduled job """ - return self._result_url + return self._result @property def timezone(self): @@ -88,7 +96,10 @@ class Schedule(Model): def priority(self): """The priority of a scheduled job """ - return self._priority + if self._priority in Job.JOB_PRIORITY: + return Job.JOB_PRIORITY[self._priority] + else: + return str(self._priority) @property def retry_limit(self): @@ -111,6 +122,27 @@ class Schedule(Model): """ return self._next_time + @property + def created_at(self): + """ + TODO: add docstring + """ + return self._created_at + + @property + def type(self): + """ + TODO: add docstring + """ + return self._type + + @property + def user_name(self): + """ + TODO: add docstring + """ + return self._user_name + def run(self, time, num=None): """Run a scheduled job """
treasure-data/td-client-python
59f47438514f128cadf945f54cf56d5f311c5338
diff --git a/tdclient/test/schedule_api_test.py b/tdclient/test/schedule_api_test.py index b7f18bb..faca3d1 100644 --- a/tdclient/test/schedule_api_test.py +++ b/tdclient/test/schedule_api_test.py @@ -70,13 +70,54 @@ def test_delete_schedule_success(): def test_list_schedules_success(): td = api.API("APIKEY") - # TODO: should be replaced by wire dump body = b""" { "schedules":[ - {"name":"foo","cron":"* * * * *","query":"SELECT COUNT(1) FROM nasdaq;","database":"sample_datasets","result":"","timezone":"UTC","delay":"","next_time":"","priority":"","retry_limit":""}, - {"name":"bar","cron":"* * * * *","query":"SELECT COUNT(1) FROM nasdaq;","database":"sample_datasets","result":"","timezone":"UTC","delay":"","next_time":"","priority":"","retry_limit":""}, - {"name":"baz","cron":"* * * * *","query":"SELECT COUNT(1) FROM nasdaq;","database":"sample_datasets","result":"","timezone":"UTC","delay":"","next_time":"","priority":"","retry_limit":""} + { + "name": "foo", + "cron": null, + "timezone": "UTC", + "delay": 0, + "created_at": "2016-08-02T17:58:40Z", + "type": "presto", + "query": "SELECT COUNT(1) FROM nasdaq;", + "database": "sample_datasets", + "user_name": "Yuu Yamashita", + "priority": 0, + "retry_limit": 0, + "result": "", + "next_time": null + }, + { + "name": "bar", + "cron": "0 0 * * *", + "timezone": "UTC", + "delay": 0, + "created_at": "2016-08-02T18:01:04Z", + "type": "presto", + "query": "SELECT COUNT(1) FROM nasdaq;", + "database": "sample_datasets", + "user_name": "Kazuki Ota", + "priority": 0, + "retry_limit": 0, + "result": "", + "next_time": "2016-09-24T00:00:00Z" + }, + { + "name": "baz", + "cron": "* * * * *", + "timezone": "UTC", + "delay": 0, + "created_at": "2016-03-02T23:01:59Z", + "type": "hive", + "query": "SELECT COUNT(1) FROM nasdaq;", + "database": "sample_datasets", + "user_name": "Yuu Yamashita", + "priority": 0, + "retry_limit": 0, + "result": "", + "next_time": "2016-07-06T00:00:00Z" + } ] } """ @@ -84,6 +125,22 @@ def test_list_schedules_success(): schedules = td.list_schedules() td.get.assert_called_with("/v3/schedule/list") assert len(schedules) == 3 + next_time = sorted([ schedule.get("next_time") for schedule in schedules if "next_time" in schedule ]) + assert len(next_time) == 3 + assert next_time[2].year == 2016 + assert next_time[2].month == 9 + assert next_time[2].day == 24 + assert next_time[2].hour == 0 + assert next_time[2].minute == 0 + assert next_time[2].second == 0 + created_at = sorted([ schedule.get("created_at") for schedule in schedules if "created_at" in schedule ]) + assert len(created_at) == 3 + assert created_at[2].year == 2016 + assert created_at[2].month == 8 + assert created_at[2].day == 2 + assert created_at[2].hour == 18 + assert created_at[2].minute == 1 + assert created_at[2].second == 4 def test_list_schedules_failure(): td = api.API("APIKEY") @@ -100,13 +157,59 @@ def test_update_schedule_success(): def test_history_success(): td = api.API("APIKEY") - # TODO: should be replaced by wire dump body = b""" { "history": [ - {"job_id":"12345"}, - {"job_id":"67890"} - ] + { + "query": "SELECT COUNT(1) FROM nasdaq;", + "type": "presto", + "priority": 0, + "retry_limit": 0, + "duration": 1, + "status": "success", + "cpu_time": null, + "result_size": 30, + "job_id": "12345", + "created_at": "2016-04-13 05:24:59 UTC", + "updated_at": "2016-04-13 05:25:02 UTC", + "start_at": "2016-04-13 05:25:00 UTC", + "end_at": "2016-04-13 05:25:01 UTC", + "num_records": 1, + "database": "sample_datasets", + "user_name": "Ryuta Kamizono", + "result": "", + "url": "https://console.treasuredata.com/jobs/12345", + "hive_result_schema": "[[\\"_col0\\", \\"bigint\\"]]", + "organization": null, + "scheduled_at": "" + }, + { + "query": "SELECT COUNT(1) FROM nasdaq;", + "type": "presto", + "priority": 0, + "retry_limit": 0, + "duration": 1, + "status": "success", + "cpu_time": null, + "result_size": 30, + "job_id": "67890", + "created_at": "2016-04-13 05:24:59 UTC", + "updated_at": "2016-04-13 05:25:02 UTC", + "start_at": "2016-04-13 05:25:00 UTC", + "end_at": "2016-04-13 05:25:01 UTC", + "num_records": 1, + "database": "sample_datasets", + "user_name": "Ryuta Kamizono", + "result": "", + "url": "https://console.treasuredata.com/jobs/67890", + "hive_result_schema": "[[\\"_col0\\", \\"bigint\\"]]", + "organization": null, + "scheduled_at": "" + } + ], + "count": 2, + "from": 0, + "to": 20 } """ td.get = mock.MagicMock(return_value=make_response(200, body))
Missing created_time and user_name in list_schedules api Schedule API returns the following for each scheduled. But, created_time and user_name are missing ``` $ curl -H "AUTHORIZATION: TD1 XXXXX" "http://api.treasuredata.com/v3/schedule/list" ... { "name":"xxx", "cron":null, "timezone":"UTC", "delay":0, "created_at":"2016-08-15T23:03:59Z", "type":"presto", "query":"xxxx", "database":"api_production", "user_name":"YYYY", "priority":0, "retry_limit":0, "result":"", "next_time":null } ``` https://github.com/treasure-data/td-client-python/blob/master/tdclient/schedule_api.py#L52-L63
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tdclient/test/schedule_api_test.py::test_list_schedules_success" ]
[ "tdclient/test/schedule_api_test.py::test_create_schedule_success", "tdclient/test/schedule_api_test.py::test_create_schedule_without_cron_success", "tdclient/test/schedule_api_test.py::test_delete_schedule_success", "tdclient/test/schedule_api_test.py::test_list_schedules_failure", "tdclient/test/schedule_api_test.py::test_update_schedule_success", "tdclient/test/schedule_api_test.py::test_history_success", "tdclient/test/schedule_api_test.py::test_run_schedule_success" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-09-23T08:10:45Z"
apache-2.0
treasure-data__td-client-python-44
diff --git a/tdclient/cursor.py b/tdclient/cursor.py index ef4b159..83cc292 100644 --- a/tdclient/cursor.py +++ b/tdclient/cursor.py @@ -82,15 +82,22 @@ class Cursor(object): return [ (column[0], None, None, None, None, None, None) for column in result_schema ] def fetchone(self): + """ + Fetch the next row of a query result set, returning a single sequence, or `None` when no more data is available. + """ self._check_executed() if self._rownumber < self._rowcount: row = self._rows[self._rownumber] self._rownumber += 1 return row else: - raise errors.InternalError("index out of bound (%d out of %d)" % (self._rownumber, self._rowcount)) + return None def fetchmany(self, size=None): + """ + Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). + An empty sequence is returned when no more rows are available. + """ if size is None: return self.fetchall() else: @@ -103,13 +110,17 @@ class Cursor(object): raise errors.InternalError("index out of bound (%d out of %d)" % (self._rownumber, self._rowcount)) def fetchall(self): + """ + Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). + Note that the cursor's arraysize attribute can affect the performance of this operation. + """ self._check_executed() if self._rownumber < self._rowcount: rows = self._rows[self._rownumber:] self._rownumber = self._rowcount return rows else: - raise errors.InternalError("row index out of bound (%d out of %d)" % (self._rownumber, self._rowcount)) + return [] def nextset(self): raise errors.NotSupportedError
treasure-data/td-client-python
e2d0e830c2b9b8615523d73ee53fb86d98c59c3b
diff --git a/tdclient/test/cursor_test.py b/tdclient/test/cursor_test.py index 24780e7..0c0e368 100644 --- a/tdclient/test/cursor_test.py +++ b/tdclient/test/cursor_test.py @@ -133,8 +133,7 @@ def test_fetchone(): assert td.fetchone() == ["foo", 1] assert td.fetchone() == ["bar", 1] assert td.fetchone() == ["baz", 2] - with pytest.raises(errors.InternalError) as error: - td.fetchone() + assert td.fetchone() == None def test_fetchmany(): td = cursor.Cursor(mock.MagicMock()) @@ -144,8 +143,9 @@ def test_fetchmany(): td._rowcount = len(td._rows) assert td.fetchmany(2) == [["foo", 1], ["bar", 1]] assert td.fetchmany() == [["baz", 2]] + assert td.fetchmany() == [] with pytest.raises(errors.InternalError) as error: - td.fetchmany() + td.fetchmany(1) def test_fetchall(): td = cursor.Cursor(mock.MagicMock()) @@ -154,8 +154,7 @@ def test_fetchall(): td._rownumber = 0 td._rowcount = len(td._rows) assert td.fetchall() == [["foo", 1], ["bar", 1], ["baz", 2]] - with pytest.raises(errors.InternalError) as error: - td.fetchall() + assert td.fetchall() == [] def test_show_job(): td = cursor.Cursor(mock.MagicMock())
[Q] An error occurs when the record count of `read_sql` is 0 I executed the following Python script. ``` pandas.read_sql("SELECT * FROM td_table WHERE 1=0", td) => tdclient.errors.InternalError: row index out of bound (0 out of 0) ``` Is this in the specifications? In the case of `mysql-connector-python`, the result is as follows. ``` pandas.read_sql("SELECT * FROM mysql_table WHERE 1=0", mysql) => Empty DataFrame ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tdclient/test/cursor_test.py::test_fetchone", "tdclient/test/cursor_test.py::test_fetchmany", "tdclient/test/cursor_test.py::test_fetchall" ]
[ "tdclient/test/cursor_test.py::test_cursor", "tdclient/test/cursor_test.py::test_cursor_close", "tdclient/test/cursor_test.py::test_cursor_execute", "tdclient/test/cursor_test.py::test_cursor_execute_format_dict", "tdclient/test/cursor_test.py::test_cursor_execute_format_tuple", "tdclient/test/cursor_test.py::test_cursor_executemany", "tdclient/test/cursor_test.py::test_check_executed", "tdclient/test/cursor_test.py::test_do_execute_success", "tdclient/test/cursor_test.py::test_do_execute_error", "tdclient/test/cursor_test.py::test_do_execute_wait", "tdclient/test/cursor_test.py::test_result_description", "tdclient/test/cursor_test.py::test_show_job", "tdclient/test/cursor_test.py::test_job_status", "tdclient/test/cursor_test.py::test_job_result", "tdclient/test/cursor_test.py::test_cursor_callproc", "tdclient/test/cursor_test.py::test_cursor_nextset", "tdclient/test/cursor_test.py::test_cursor_setinputsizes", "tdclient/test/cursor_test.py::test_cursor_setoutputsize" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2018-06-29T01:06:42Z"
apache-2.0
troycomi__reportseff-21
diff --git a/pyproject.toml b/pyproject.toml index 9d2f914..f2ed2ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "reportseff" -version = "2.4.2" +version = "2.4.3" description= "Tablular seff output" authors = ["Troy Comi <[email protected]>"] license = "MIT" diff --git a/src/reportseff/job.py b/src/reportseff/job.py index c089426..97c7075 100644 --- a/src/reportseff/job.py +++ b/src/reportseff/job.py @@ -114,7 +114,9 @@ class Job: for k, value in entry.items(): if k not in self.other_entries or not self.other_entries[k]: self.other_entries[k] = value - self.stepmem += parsemem(entry["MaxRSS"]) if "MaxRSS" in entry else 0 + # self.stepmem += parsemem(entry["MaxRSS"]) if "MaxRSS" in entry else 0 + mem = parsemem(entry["MaxRSS"]) if "MaxRSS" in entry else 0 + self.stepmem = max(self.stepmem, mem) def _update_main_job(self, entry: Dict) -> None: """Update properties for the main job.
troycomi/reportseff
f1502769622e9744becca91d034607e8cb183ca5
diff --git a/tests/test_reportseff.py b/tests/test_reportseff.py index 36c40f3..9674186 100644 --- a/tests/test_reportseff.py +++ b/tests/test_reportseff.py @@ -63,7 +63,7 @@ def test_directory_input(mocker, mock_inquirer): "01:27:42", "48.7%", "99.8%", - "47.7%", + "47.6%", ] @@ -220,7 +220,7 @@ def test_simple_job(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n")[1:] - assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] + assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] def test_simple_user(mocker, mock_inquirer): @@ -250,8 +250,8 @@ def test_simple_user(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n")[1:] - assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] - assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.6%"] + assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] + assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.5%"] def test_simple_partition(mocker, mock_inquirer): @@ -282,8 +282,8 @@ def test_simple_partition(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n")[1:] - assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] - assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.6%"] + assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] + assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.5%"] def test_format_add(mocker, mock_inquirer): @@ -336,8 +336,8 @@ def test_since(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n")[1:] - assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] - assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.6%"] + assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] + assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.5%"] def test_since_all_users(mocker, mock_inquirer): @@ -372,8 +372,8 @@ def test_since_all_users(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n")[1:] - assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] - assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.6%"] + assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] + assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.5%"] mock_sub.assert_called_once_with( args=( @@ -423,8 +423,8 @@ def test_since_all_users_partition(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n")[1:] - assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] - assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.6%"] + assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] + assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.5%"] mock_sub.assert_called_once_with( args=( @@ -474,7 +474,7 @@ def test_parsable(mocker, mock_inquirer): # remove header output = result.output.split("\n")[1:] # no color/bold codes and | delimited - assert output[0].split("|") == ["24418435", "COMPLETED", "01:27:42", "99.8", "47.7"] + assert output[0].split("|") == ["24418435", "COMPLETED", "01:27:42", "99.8", "47.6"] # other is suppressed by state filter assert output[1].split("|") == ["25569410", "RUNNING", "21:14:48", "---", "---"] @@ -509,7 +509,7 @@ def test_simple_state(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n")[1:] - assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] + assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] # other is suppressed by state filter assert output[1].split() == [] @@ -544,7 +544,7 @@ def test_simple_not_state(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n")[1:] - assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] + assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] # other is suppressed by state filter assert output[1].split() == [] @@ -582,7 +582,7 @@ def test_invalid_not_state(mocker, mock_inquirer): assert output[0] == "Unknown state CUNNING" assert output[1] == "No valid states provided to exclude" # output 2 is header - assert output[3].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"] + assert output[3].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] assert output[4].split() == ["25569410", "RUNNING", "21:14:48", "---", "---"] assert output[5].split() == [] @@ -860,3 +860,67 @@ def test_no_systems(mocker, mock_inquirer): # remove header output = result.output.split("\n") assert output[0] == "No supported scheduling systems found!" + + +def test_issue_16(mocker, mock_inquirer): + """Incorrect memory usage for multi-node jobs.""" + mocker.patch("reportseff.console.which", return_value=True) + runner = CliRunner() + sub_result = mocker.MagicMock() + sub_result.returncode = 0 + sub_result.stdout = ( + "|16|07:36:03|65638294|65638294||2|32G|COMPLETED|6-23:59:00|4-23:56:21\n" + "|1|07:36:03|65638294.batch|65638294.batch|1147220K|1||COMPLETED||07:30:20\n" + "|16|07:36:03|65638294.extern|65638294.extern|0|2||COMPLETED||00:00.001\n" + "|15|00:00:11|65638294.0|65638294.0|0|1||COMPLETED||00:11.830\n" + "|15|00:02:15|65638294.1|65638294.1|4455540K|1||COMPLETED||31:09.458\n" + "|15|00:00:10|65638294.2|65638294.2|0|1||COMPLETED||00:00:04\n" + "|15|00:00:08|65638294.3|65638294.3|0|1||COMPLETED||00:09.602\n" + "|15|00:00:07|65638294.4|65638294.4|0|1||COMPLETED||00:56.827\n" + "|15|00:00:06|65638294.5|65638294.5|0|1||COMPLETED||00:03.512\n" + "|15|00:00:08|65638294.6|65638294.6|0|1||COMPLETED||00:08.520\n" + "|15|00:00:13|65638294.7|65638294.7|0|1||COMPLETED||01:02.013\n" + "|15|00:00:02|65638294.8|65638294.8|0|1||COMPLETED||00:03.639\n" + "|15|00:00:06|65638294.9|65638294.9|0|1||COMPLETED||00:08.683\n" + "|15|00:00:08|65638294.10|65638294.10|0|1||COMPLETED||00:57.438\n" + "|15|00:00:06|65638294.11|65638294.11|0|1||COMPLETED||00:03.642\n" + "|15|00:00:09|65638294.12|65638294.12|0|1||COMPLETED||00:10.271\n" + "|15|00:01:24|65638294.13|65638294.13|4149700K|1||COMPLETED||17:18.067\n" + "|15|00:00:01|65638294.14|65638294.14|0|1||COMPLETED||00:03.302\n" + "|15|00:00:10|65638294.15|65638294.15|0|1||COMPLETED||00:14.615\n" + "|15|00:06:45|65638294.16|65638294.16|4748052K|1||COMPLETED||01:36:40\n" + "|15|00:00:10|65638294.17|65638294.17|0|1||COMPLETED||00:03.864\n" + "|15|00:00:09|65638294.18|65638294.18|0|1||COMPLETED||00:48.987\n" + "|15|01:32:53|65638294.19|65638294.19|7734356K|1||COMPLETED||23:09:33\n" + "|15|00:00:01|65638294.20|65638294.20|0|1||COMPLETED||00:03.520\n" + "|15|00:00:07|65638294.21|65638294.21|0|1||COMPLETED||00:50.015\n" + "|15|00:55:17|65638294.22|65638294.22|8074500K|1||COMPLETED||13:45:29\n" + "|15|00:00:13|65638294.23|65638294.23|0|1||COMPLETED||00:04.413\n" + "|15|00:00:12|65638294.24|65638294.24|0|1||COMPLETED||00:49.100\n" + "|15|00:57:41|65638294.25|65638294.25|7883152K|1||COMPLETED||14:20:36\n" + "|15|00:00:01|65638294.26|65638294.26|0|1||COMPLETED||00:03.953\n" + "|15|00:00:05|65638294.27|65638294.27|0|1||COMPLETED||00:47.223\n" + "|15|01:00:17|65638294.28|65638294.28|7715752K|1||COMPLETED||14:59:40\n" + "|15|00:00:06|65638294.29|65638294.29|0|1||COMPLETED||00:04.341\n" + "|15|00:00:07|65638294.30|65638294.30|0|1||COMPLETED||00:50.416\n" + "|15|01:22:31|65638294.31|65638294.31|7663264K|1||COMPLETED||20:33:59\n" + "|15|00:00:05|65638294.32|65638294.32|0|1||COMPLETED||00:04.199\n" + "|15|00:00:08|65638294.33|65638294.33|0|1||COMPLETED||00:50.009\n" + "|15|01:32:23|65638294.34|65638294.34|7764884K|1||COMPLETED||23:01:52\n" + "|15|00:00:06|65638294.35|65638294.35|0|1||COMPLETED||00:04.527\n" + ) + mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) + result = runner.invoke(console.main, "--no-color 65638294".split()) + + assert result.exit_code == 0 + # remove header + output = result.output.split("\n")[1:-1] + assert output[0].split() == [ + "65638294", + "COMPLETED", + "07:36:03", + "4.5%", + "98.6%", + "24.1%", + ] + assert len(output) == 1
Memory Efficiency differs between reportseff and seff for multi-node jobs I've just started using reportseff on our university cluster specifically for the reasons mentioned on your blog post (be good to your scheduler) and overall its great. However, I've noticed that the memory efficiency reports by reportseff and seff differ specifically for multi node jobs. For example: reportseff: 65638294 COMPLETED 07:36:03 4.5% 98.6% 182.8% seff: Job ID: 65638294 State: COMPLETED (exit code 0) Nodes: 2 Cores per node: 8 CPU Utilized: 4-23:56:22 CPU Efficiency: 98.62% of 5-01:36:48 core-walltime Job Wall-clock time: 07:36:03 Memory Utilized: 7.70 GB Memory Efficiency: 24.06% of 32.00 GB I relatively new to both reportseff and seff so I am not sure if this an underlying issue with reporting for the SLURM side, but any insight into the origin the difference would be useful for interpreting the results. Thanks!
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_reportseff.py::test_directory_input", "tests/test_reportseff.py::test_simple_job", "tests/test_reportseff.py::test_simple_user", "tests/test_reportseff.py::test_simple_partition", "tests/test_reportseff.py::test_since", "tests/test_reportseff.py::test_since_all_users", "tests/test_reportseff.py::test_since_all_users_partition", "tests/test_reportseff.py::test_parsable", "tests/test_reportseff.py::test_simple_state", "tests/test_reportseff.py::test_simple_not_state", "tests/test_reportseff.py::test_invalid_not_state", "tests/test_reportseff.py::test_issue_16" ]
[ "tests/test_reportseff.py::test_directory_input_exception", "tests/test_reportseff.py::test_debug_option", "tests/test_reportseff.py::test_process_failure", "tests/test_reportseff.py::test_short_output", "tests/test_reportseff.py::test_long_output", "tests/test_reportseff.py::test_format_add", "tests/test_reportseff.py::test_no_state", "tests/test_reportseff.py::test_array_job_raw_id", "tests/test_reportseff.py::test_array_job_single", "tests/test_reportseff.py::test_array_job_base", "tests/test_reportseff.py::test_sacct_error", "tests/test_reportseff.py::test_empty_sacct", "tests/test_reportseff.py::test_failed_no_mem", "tests/test_reportseff.py::test_canceled_by_other", "tests/test_reportseff.py::test_zero_runtime", "tests/test_reportseff.py::test_no_systems" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2023-01-23T19:24:12Z"
mit
troycomi__reportseff-29
diff --git a/README.md b/README.md index 467c572..cc6c199 100644 --- a/README.md +++ b/README.md @@ -122,7 +122,7 @@ split_ubam_24419972 RUNNING 01:26:26 --- --- Without arguments, reportseff will try to find slurm output files in the current directory. Combine with `watch` to monitor job progress: -`watch -cn 300 reportseff --modified-sort` +`watch -cn 300 reportseff --color --modified-sort` ```txt JobID State Elapsed CPUEff MemEff @@ -214,6 +214,10 @@ directory to check for slurm outputs. - `--since`: Limit results to those occurring after the specified time. Accepts sacct formats and a comma separated list of key/value pairs. To get jobs in the last hour and a half, can pass `h=1,m=30`. +-`--until`: Limit results to those occurring before the specified time. Accepts + sacct formats and a comma separated list of key/value pairs. + Useful in combination with the 'since' option to query a specific range. +- `--partition`: Limit results to a specific partition. - `--node/-n`: Display information for multi-node jobs; requires additional sacct fields from jobstats. - `--node-and-gpu/-g`: Display information for multi-node jobs and GPU information; @@ -248,6 +252,23 @@ you get an error that pip isn't found, look for a python/anaconda/conda module. in an isolated environment. This resolves issues of dependency versions and allows applications to be run from any environment. +### The output has no color with many jobs! + +Click should determine if the output supports color display and react automatically +in a way you expect. Check that your terminal is setup to display colors and +that your pager (probably less) will display color by default. Some commands, +e.g. `watch` aren't handled properly even when invoked to support color. Here +are some useful settings for your `.bashrc`: +``` +# have less display colors by default. Will fix `reportseff` not showing colors +export LESS="-R" +# for watch aliases, include the `--color` option +watch -cn 300 reportseff --color --modified-sort +# ^ ^^^^^^^ +``` +You can always for display of color (or suppress it) with the `--color/--no-color` +options + ### I get an error about broken pipes when chaining to other commands Python will report that the consumer of process output has closed the stream @@ -264,20 +285,6 @@ will likely be absent. Node-level reporting is only shown for jobs which use multiple nodes or GPUs. If you need a list of where jobs were run, you can add `--format +NodeList`. -### My output is garbled with ESC[0m all over, where's the color? - -Those are ANSI color codes. Click will usually strip these if it detects -the consuming process can't display color codes, but `reportseff` defaults -to always display them. If you don't care for color, use the `--no-color` -option. For less, you can set -``` -export LESS="-R" -``` -in your `.bashrc`, or just type `-R` in an active less process. Some versions -of `watch` require the `-c` option to display color, others can't display -colors properly. If you search for `ansi color <tool>` you should get some -solutions. - ## Acknowledgments The code for calling sacct and parsing the returning information was taken diff --git a/pyproject.toml b/pyproject.toml index 58ab7e9..1891ee6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "reportseff" -version = "2.7.3" +version = "2.7.4" description= "Tablular seff output" authors = ["Troy Comi <[email protected]>"] license = "MIT" diff --git a/src/reportseff/console.py b/src/reportseff/console.py index 9087594..4803929 100644 --- a/src/reportseff/console.py +++ b/src/reportseff/console.py @@ -21,7 +21,7 @@ from .parameters import ReportseffParameters ) @click.option( "--color/--no-color", - default=True, + default=None, help="Force color output. No color will use click defaults", ) @click.option( diff --git a/src/reportseff/parameters.py b/src/reportseff/parameters.py index 2c12a9e..1f28ad4 100644 --- a/src/reportseff/parameters.py +++ b/src/reportseff/parameters.py @@ -28,7 +28,7 @@ class ReportseffParameters: def __init__( self, jobs: tuple, - color: bool = True, + color: bool, debug: bool = False, format_str: str = "", modified_sort: bool = False,
troycomi/reportseff
18236d7200309e852fd4aa9102b2565fb32d0ebf
diff --git a/tests/test_reportseff.py b/tests/test_reportseff.py index 1605f8a..8a155ae 100644 --- a/tests/test_reportseff.py +++ b/tests/test_reportseff.py @@ -173,10 +173,10 @@ def test_short_output(mocker, mock_inquirer): mocker.patch.object(OutputRenderer, "format_jobs", return_value="output") mock_click = mocker.patch("reportseff.console.click.echo") - result = runner.invoke(console.main, "--no-color 23000233".split()) + result = runner.invoke(console.main, " 23000233".split()) assert result.exit_code == 0 - mock_click.assert_called_once_with("output", color=False) + mock_click.assert_called_once_with("output", color=None) def test_long_output(mocker, mock_inquirer): @@ -192,10 +192,10 @@ def test_long_output(mocker, mock_inquirer): mocker.patch("reportseff.console.len", return_value=21) mocker.patch.object(OutputRenderer, "format_jobs", return_value="output") mock_click = mocker.patch("reportseff.console.click.echo_via_pager") - result = runner.invoke(console.main, "--no-color 23000233".split()) + result = runner.invoke(console.main, " 23000233".split()) assert result.exit_code == 0 - mock_click.assert_called_once_with("output", color=False) + mock_click.assert_called_once_with("output", color=None) def test_simple_job(mocker, mock_inquirer):
Enhancement default behavior of color Hi, I am wondering if it is possible to change the behavior of the option color/no-color. By default, with more than 20 lines, it will create a pager with the color activated because the default of color is True: ``` reportseff --since d=7 ESC[1m JobIDESC[0m ESC[1m State ESC[0m ESC[1m ElapsedESC[0m ESC[1m TimeEff ESC[0m ESC[1m CPUEff ESC[0m ESC[1m MemEff ESC[0m 15113138 ESC[32m COMPLETED ESC[0m 00:37:55 ESC[31m 0.0% ESC[0m ESC[32m 94.6% ESC[0m ESC[31m 0.1% ESC[0m 15116136 ESC[32m COMPLETED ESC[0m 00:37:55 ESC[31m 0.0% ESC[0m ESC[32m 94.7% ESC[0m ESC[31m 0.1% ESC[0m 15119524 ESC[32m COMPLETED ESC[0m 01:37:30 ESC[31m 0.0% ESC[0m 67.3% ESC[31m 0.2% ESC[0m [...] ``` I think it will be easier to use reportseff for an user, if it let the default value of color of the function echo_via_pager which is "autodetection". So the library will detect if it can display the color or not. In my example, I modify a bit the function main by the following code : ``` if entries > 20: if args.color : click.echo_via_pager(output,args.color) elif not args.color: click.echo_via_pager(output,args.color) else: click.echo_via_pager(output) else: if args.color : click.echo(output, color=args.color) else: click.echo(output) ``` Where args.color is equal to None (instead of True) if the option is not set. So with this "solution" : --> `reportseff --since d=7` will output in color on my shell without need of pipe. --> `reportseff --since d=7 >output_reportseff` will write in the file without color. --> `reportseff --color --since d=7` will output ANSI color codes on my shell, need to pipe with less -R to be readable. --> `reportseff --color --since d=7 >output_reportseff` will write in the file the ANSI color codes need to use the option -r to be readable. --> `reportseff --no-color --since d=7` will output without color on my shell. --> `reportseff --no-color --since d=7 >output_reportseff` will write in the file without the color. The current behavior : --> `reportseff --since d=7` will output ANSI color codes on my shell, need to pipe with less -R to be readable. --> `reportseff --since d=7 >output_reportseff` will write in the file the ANSI color codes, need to use the option -r to be readable. --> `reportseff --color --since d=7` will output ANSI color codes on my shell, need to pipe with less -R to be readable. --> `reportseff --color --since d=7 >output_reportseff` will write in the file the ANSI color codes, need to use the option -r to be readable. --> `reportseff --no-color --since d=7` will output without color on my shell. --> `reportseff --no-color --since d=7 >output_reportseff` will write in the file without the color. So it will only change de default behavior, if you don't set the option color/no-color. I have seen what you have write here https://github.com/troycomi/reportseff#my-output-is-garbled-with-esc0m-all-over-wheres-the-color, but it seems to me more user friendly with my approach. What do you think about that? I hope it is understandable, thanks for your tool again !
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_reportseff.py::test_short_output", "tests/test_reportseff.py::test_long_output" ]
[ "tests/test_reportseff.py::test_directory_input", "tests/test_reportseff.py::test_directory_input_exception", "tests/test_reportseff.py::test_debug_option", "tests/test_reportseff.py::test_process_failure", "tests/test_reportseff.py::test_simple_job", "tests/test_reportseff.py::test_simple_user", "tests/test_reportseff.py::test_simple_partition", "tests/test_reportseff.py::test_format_add", "tests/test_reportseff.py::test_since", "tests/test_reportseff.py::test_since_all_users", "tests/test_reportseff.py::test_since_all_users_partition", "tests/test_reportseff.py::test_parsable", "tests/test_reportseff.py::test_simple_state", "tests/test_reportseff.py::test_simple_not_state", "tests/test_reportseff.py::test_invalid_not_state", "tests/test_reportseff.py::test_no_state", "tests/test_reportseff.py::test_array_job_raw_id", "tests/test_reportseff.py::test_array_job_single", "tests/test_reportseff.py::test_array_job_base", "tests/test_reportseff.py::test_sacct_error", "tests/test_reportseff.py::test_empty_sacct", "tests/test_reportseff.py::test_failed_no_mem", "tests/test_reportseff.py::test_canceled_by_other", "tests/test_reportseff.py::test_zero_runtime", "tests/test_reportseff.py::test_no_systems", "tests/test_reportseff.py::test_issue_16", "tests/test_reportseff.py::test_energy_reporting", "tests/test_reportseff.py::test_extra_args" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-03-31T18:20:29Z"
mit
troycomi__reportseff-42
diff --git a/pyproject.toml b/pyproject.toml index d536f8b..2b81226 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "reportseff" -version = "2.7.5" +version = "2.7.6" description= "Tablular seff output" authors = ["Troy Comi <[email protected]>"] license = "MIT" diff --git a/src/reportseff/job.py b/src/reportseff/job.py index 5cba205..43750f8 100644 --- a/src/reportseff/job.py +++ b/src/reportseff/job.py @@ -60,7 +60,6 @@ class Job: self.time: Optional[str] = "---" self.time_eff: Union[str, float] = "---" self.cpu: Optional[Union[str, float]] = "---" - self.mem: Union[str, float] = "---" self.state: Optional[str] = None self.mem_eff: Optional[float] = None self.gpu: Optional[float] = None @@ -116,7 +115,8 @@ class Job: if k not in self.other_entries or not self.other_entries[k]: self.other_entries[k] = value mem = parsemem(entry["MaxRSS"]) if "MaxRSS" in entry else 0 - self.stepmem = max(self.stepmem, mem) + tasks = int(entry.get("NTasks", 1)) + self.stepmem = max(self.stepmem, mem * tasks) if "TRESUsageOutAve" in entry: self.energy = max( diff --git a/src/reportseff/output_renderer.py b/src/reportseff/output_renderer.py index 161accd..a5c2da0 100644 --- a/src/reportseff/output_renderer.py +++ b/src/reportseff/output_renderer.py @@ -45,7 +45,7 @@ class OutputRenderer: # values derived from other values, list includes all dependent values self.derived: Dict[str, List] = { "CPUEff": ["TotalCPU", "AllocCPUS", "Elapsed"], - "MemEff": ["REQMEM", "NNodes", "AllocCPUS", "MaxRSS"], + "MemEff": ["REQMEM", "NNodes", "AllocCPUS", "MaxRSS", "NTasks"], "TimeEff": ["Elapsed", "Timelimit"], "GPU": [], "GPUMem": [],
troycomi/reportseff
48acc9601d20ffedaa758fbd21c834982a8298bc
diff --git a/conftest.py b/conftest.py index 44f8a6a..0756750 100644 --- a/conftest.py +++ b/conftest.py @@ -30,6 +30,7 @@ def to_sacct_dict(sacct_line: str) -> dict: "State", "Timelimit", "TotalCPU", + "NTasks", ) return dict(zip(columns, sacct_line.split("|"))) @@ -316,3 +317,90 @@ def multinode_job(): "|36|12-14:16:39|6196869.batch|6196869.batch|33824748K|1|191846Mn|COMPLETED||451-06:00:24" ), ] + + [email protected] +def issue_41(): + """job run on multiple nodes, with multiple tasks.""" + return [ + to_sacct_dict( + "|8|00:00:53|131042|131042||1|16000M|COMPLETED|00:01:00|06:57.815|8" + ), + to_sacct_dict( + "|8|00:00:53|131042.batch|131042.batch|20264K|1||COMPLETED||00:00.034|8" + ), + to_sacct_dict( + "|8|00:00:53|131042.extern|131042.extern|1052K|1||COMPLETED||00:00.001|8" + ), + to_sacct_dict( + "|8|00:00:53|131042.0|131042.0|1947276K|1||COMPLETED||06:57.779|8" + ), + ] + + [email protected] +def console_jobs(): + """collection of sacct outputs for test_reportseff.""" + + # indexed on job id + return { + "25569410_notime": ( + "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^1^|^4000Mc^|^" + "COMPLETED^|^19:28:36\n" + "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^" + "1^|^1^|^4000Mc^|^COMPLETED^|^00:00:00\n" + "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" + "^|^1^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" + ), + "24418435_notime": ( + "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1^|^1Gn^|^" + "COMPLETED^|^01:27:29\n" + "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^" + "1^|^1^|^1Gn^|^COMPLETED^|^01:27:29\n" + "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^" + "1^|^1^|^1Gn^|^COMPLETED^|^00:00:00\n" + ), + "24418435": ( + "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1^|^1Gn^|^" + "COMPLETED^|^03:00:00^|^01:27:29\n" + "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^" + "1^|^1^|^1Gn^|^COMPLETED^|^^|^01:27:29\n" + "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^" + "1^|^1^|^1Gn^|^COMPLETED^|^^|^00:00:00\n" + ), + "23000233": ( + "^|^16^|^00:00:00^|^23000233^|^23000233^|^^|^1^|^1^|^4000Mc^|^" + "CANCELLED by 129319^|^6-00:00:00^|^00:00:00\n" + ), + "24221219": ( + "^|^1^|^00:09:34^|^24220929_421^|^24221219^|^^|^1^|^1^|^16000Mn^|^" + "COMPLETED^|^09:28.052\n" + "^|^1^|^00:09:34^|^24220929_421.batch^|^24221219.batch" + "^|^5664932K^|^1^|^1^|^16000Mn^|^COMPLETED^|^09:28.051\n" + "^|^1^|^00:09:34^|^24220929_421.extern^|^24221219.extern" + "^|^1404K^|^1^|^1^|^16000Mn^|^COMPLETED^|^00:00:00\n" + ), + "24221220": ( + "^|^1^|^00:09:33^|^24220929_431^|^24221220^|^^|^1^|^1^|^16000Mn^|^" + "PENDING^|^09:27.460\n" + "^|^1^|^00:09:33^|^24220929_431.batch^|^24221220.batch" + "^|^5518572K^|^1^|^1^|^16000Mn^|^PENDING^|^09:27.459\n" + "^|^1^|^00:09:33^|^24220929_431.extern^|^24221220.extern" + "^|^1400K^|^1^|^1^|^16000Mn^|^PENDING^|^00:00:00\n" + ), + "23000381": ( + "^|^8^|^00:00:12^|^23000381^|^23000381^|^^|^1^|^1^|^4000Mc^|^FAILED^|^00:00:00\n" + "^|^8^|^00:00:12^|^23000381.batch^|^23000381.batch^|^^|^1^|^1^|^4000Mc^|^" + "FAILED^|^00:00:00\n" + "^|^8^|^00:00:12^|^23000381.extern^|^23000381.extern^|^1592K^|^1^|^1^|^4000Mc^|^" + "COMPLETED^|^00:00:00\n" + ), + "23000210": ( + "^|^8^|^00:00:00^|^23000210^|^23000210^|^^|^1^|^1^|^20000Mn^|^" + "FAILED^|^00:00.007\n" + "^|^8^|^00:00:00^|^23000210.batch^|^23000210.batch^|^1988K^|^1^|^1^|^20000Mn^|^" + "FAILED^|^00:00.006\n" + "^|^8^|^00:00:00^|^23000210.extern^|^23000210.extern^|^1556K^|^1^|^1^|^20000Mn^|^" + "COMPLETED^|^00:00:00\n" + ), + } diff --git a/tests/test_job.py b/tests/test_job.py index bb09c5e..7f6eb46 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -40,7 +40,6 @@ def test_job_init(job): assert job.totalmem is None assert job.time == "---" assert job.cpu == "---" - assert job.mem == "---" assert job.state is None @@ -250,7 +249,7 @@ def test_update_part_job(): "Elapsed": "00:10:00", "MaxRSS": "495644K", "NNodes": "1", - "NTasks": "", + "NTasks": "1", } ) assert job.state is None @@ -765,3 +764,16 @@ def test_multinode_job(multinode_job): job.update(line) assert job.cpu == 5.0 + + +def test_multinode_job_issue_41(issue_41): + """Testing issue 41 where multiple tasks are used. + + Previously reported incorrect memory efficiency. + """ + job = job_module.Job("131042", "131042", None) + for line in issue_41: + job.update(line) + + assert job.cpu == 98.3 + assert job.get_entry("MemEff") == 95.1 diff --git a/tests/test_output_renderer.py b/tests/test_output_renderer.py index 763fe65..09896a6 100644 --- a/tests/test_output_renderer.py +++ b/tests/test_output_renderer.py @@ -175,7 +175,7 @@ def test_renderer_init(renderer): assert sorted(renderer.query_columns) == sorted( ( "JobID JobIDRaw State Elapsed TotalCPU " - "AllocCPUS REQMEM NNodes MaxRSS AdminComment" + "AllocCPUS REQMEM NNodes NTasks MaxRSS AdminComment" ).split() ) @@ -277,7 +277,7 @@ def test_renderer_correct_columns(renderer): ( "JobID TotalCPU Elapsed REQMEM" " JobIDRaw State AdminComment" - " NNodes AllocCPUS MaxRSS Timelimit" + " NNodes NTasks AllocCPUS MaxRSS Timelimit" ).split() ) diff --git a/tests/test_reportseff.py b/tests/test_reportseff.py index a3a44d8..50a1f15 100644 --- a/tests/test_reportseff.py +++ b/tests/test_reportseff.py @@ -30,20 +30,13 @@ def mock_inquirer(mocker): ) -def test_directory_input(mocker, mock_inquirer): +def test_directory_input(mocker, mock_inquirer, console_jobs): """Able to get jobs from directory calls.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^03:00:00^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["24418435"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) def set_jobs(self, directory): @@ -68,20 +61,13 @@ def test_directory_input(mocker, mock_inquirer): ] -def test_directory_input_exception(mocker, mock_inquirer): +def test_directory_input_exception(mocker, mock_inquirer, console_jobs): """Catch exceptions in setting jobs from directory.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "24418435^|^24418435^|^COMPLETED^|^1^|^" - "01:27:29^|^01:27:42^|^03:00:00^|^1Gn^|^^|^1^|^\n" - "24418435.batch^|^24418435.batch^|^COMPLETED^|^1^|^" - "01:27:29^|^01:27:42^|^^|^1Gn^|^499092K^|^1^|^1\n" - "24418435.extern^|^24418435.extern^|^COMPLETED^|^1^|^" - "00:00:00^|^01:27:42^|^^|^1Gn^|^1376K^|^1^|^1\n" - ) + sub_result.stdout = console_jobs["24418435"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) def set_jobs(self, directory): @@ -94,16 +80,13 @@ def test_directory_input_exception(mocker, mock_inquirer): assert "Testing EXCEPTION" in result.output -def test_debug_option(mocker, mock_inquirer): +def test_debug_option(mocker, mock_inquirer, console_jobs): """Setting debug prints subprocess result.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^16^|^00:00:00^|^23000233^|^23000233^|^^|^1^|^4000Mc^|^" - "CANCELLED by 129319^|^6-00:00:00^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["23000233"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -113,10 +96,7 @@ def test_debug_option(mocker, mock_inquirer): assert result.exit_code == 0 # remove header output = result.output.split("\n") - assert output[0] == ( - "^|^16^|^00:00:00^|^23000233^|^23000233^|^^|^1^|^4000Mc^|^" - "CANCELLED by 129319^|^6-00:00:00^|^00:00:00" - ) + assert output[0] == console_jobs["23000233"].strip("\n") assert output[3].split() == [ "23000233", "CANCELLED", @@ -127,16 +107,13 @@ def test_debug_option(mocker, mock_inquirer): ] -def test_process_failure(mocker, mock_inquirer): +def test_process_failure(mocker, mock_inquirer, console_jobs): """Catch exceptions in process_entry by printing the offending entry.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^16^|^00:00:00^|^23000233^|^23000233^|^^|^1^|^4000Mc^|^" - "CANCELLED by 129319^|^6-00:00:00^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["23000233"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) mocker.patch.object( JobCollection, "process_entry", side_effect=Exception("TESTING") @@ -153,21 +130,18 @@ def test_process_failure(mocker, mock_inquirer): "{'AdminComment': '', 'AllocCPUS': '16', " "'Elapsed': '00:00:00', 'JobID': '23000233', " "'JobIDRaw': '23000233', 'MaxRSS': '', 'NNodes': '1', " - "'REQMEM': '4000Mc', 'State': 'CANCELLED by 129319', " + "'NTasks': '1', 'REQMEM': '4000Mc', 'State': 'CANCELLED by 129319', " "'TotalCPU': '6-00:00:00'}" ) -def test_short_output(mocker, mock_inquirer): +def test_short_output(mocker, mock_inquirer, console_jobs): """Outputs with 20 or fewer entries are directly printed.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^23000233^|^23000233^|^CANCELLED by 129319^|^16^|^" - "00:00:00^|^00:00:00^|^6-00:00:00^|^4000Mc^|^^|^1^|^\n" - ) + sub_result.stdout = console_jobs["23000233"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) mocker.patch("reportseff.console.len", return_value=20) mocker.patch.object(OutputRenderer, "format_jobs", return_value="output") @@ -179,16 +153,13 @@ def test_short_output(mocker, mock_inquirer): mock_click.assert_called_once_with("output", color=None) -def test_long_output(mocker, mock_inquirer): +def test_long_output(mocker, mock_inquirer, console_jobs): """Outputs with more than 20 entries are echoed via pager.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^16^|^00:00:00^|^23000233^|^23000233" - "^|^^|^1^|^4000Mc^|^CANCELLED by 129319^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["23000233"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) mocker.patch("reportseff.console.len", return_value=21) mocker.patch.object(OutputRenderer, "format_jobs", return_value="output") @@ -199,20 +170,13 @@ def test_long_output(mocker, mock_inquirer): mock_click.assert_called_once_with("output", color=None) -def test_simple_job(mocker, mock_inquirer): +def test_simple_job(mocker, mock_inquirer, console_jobs): """Can get efficiency from a single job.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["24418435_notime"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -225,24 +189,14 @@ def test_simple_job(mocker, mock_inquirer): assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.6%"] -def test_simple_user(mocker, mock_inquirer): +def test_simple_user(mocker, mock_inquirer, console_jobs): """Can limit outputs by user.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" + console_jobs["24418435_notime"] + console_jobs["25569410_notime"] ) mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( @@ -257,24 +211,14 @@ def test_simple_user(mocker, mock_inquirer): assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.5%"] -def test_simple_partition(mocker, mock_inquirer): +def test_simple_partition(mocker, mock_inquirer, console_jobs): """Can limit outputs by partition.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0" - "^|^62328K^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" + console_jobs["24418435_notime"] + console_jobs["25569410_notime"] ) mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( @@ -310,24 +254,14 @@ def test_format_add(mocker, mock_inquirer): ) -def test_since(mocker, mock_inquirer): +def test_since(mocker, mock_inquirer, console_jobs): """Can limit outputs by time since argument.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" + console_jobs["24418435_notime"] + console_jobs["25569410_notime"] ) mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( @@ -345,24 +279,14 @@ def test_since(mocker, mock_inquirer): assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.5%"] -def test_since_all_users(mocker, mock_inquirer): +def test_since_all_users(mocker, mock_inquirer, console_jobs): """Can limit outputs by time since argument.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" + console_jobs["24418435_notime"] + console_jobs["25569410_notime"] ) mock_sub = mocker.patch( "reportseff.db_inquirer.subprocess.run", return_value=sub_result @@ -385,7 +309,7 @@ def test_since_all_users(mocker, mock_inquirer): args=( "sacct -P -n --delimiter=^|^ " "--format=AdminComment,AllocCPUS,Elapsed,JobID,JobIDRaw," - "MaxRSS,NNodes,REQMEM,State,TotalCPU " + "MaxRSS,NNodes,NTasks,REQMEM,State,TotalCPU " "--allusers " # all users is added since no jobs/files were specified "--starttime=200406" ).split(), @@ -397,24 +321,14 @@ def test_since_all_users(mocker, mock_inquirer): ) -def test_since_all_users_partition(mocker, mock_inquirer): +def test_since_all_users_partition(mocker, mock_inquirer, console_jobs): """Can limit outputs by time since and partition argument.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^COMPLETED^|^19:28:36\n" + console_jobs["24418435_notime"] + console_jobs["25569410_notime"] ) mock_sub = mocker.patch( "reportseff.db_inquirer.subprocess.run", return_value=sub_result @@ -437,7 +351,7 @@ def test_since_all_users_partition(mocker, mock_inquirer): args=( "sacct -P -n --delimiter=^|^ " "--format=AdminComment,AllocCPUS,Elapsed,JobID,JobIDRaw," - "MaxRSS,NNodes,REQMEM,State,TotalCPU " + "MaxRSS,NNodes,NTasks,REQMEM,State,TotalCPU " "--allusers " # all users is added since no jobs/files were specified "--starttime=200406 " "--partition=partition " @@ -450,25 +364,15 @@ def test_since_all_users_partition(mocker, mock_inquirer): ) -def test_parsable(mocker, mock_inquirer): +def test_parsable(mocker, mock_inquirer, console_jobs): """Can display output as parsable format.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "RUNNING^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - ) + sub_result.stdout = console_jobs["24418435_notime"] + console_jobs[ + "25569410_notime" + ].replace("COMPLETED", "RUNNING") mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -483,29 +387,18 @@ def test_parsable(mocker, mock_inquirer): output = result.output.split("\n")[1:] # no color/bold codes and ^|^ delimited assert output[0].split("|") == ["24418435", "COMPLETED", "01:27:42", "99.8", "47.6"] - # other is suppressed by state filter assert output[1].split("|") == ["25569410", "RUNNING", "21:14:48", "---", "---"] -def test_simple_state(mocker, mock_inquirer): +def test_simple_state(mocker, mock_inquirer, console_jobs): """Can limit outputs by filtering state.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "RUNNING^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - ) + sub_result.stdout = console_jobs["24418435_notime"] + console_jobs[ + "25569410_notime" + ].replace("COMPLETED", "RUNNING") mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -523,25 +416,15 @@ def test_simple_state(mocker, mock_inquirer): assert output[1].split() == [] -def test_simple_not_state(mocker, mock_inquirer): +def test_simple_not_state(mocker, mock_inquirer, console_jobs): """Can limit outputs by removing state.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "RUNNING^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - ) + sub_result.stdout = console_jobs["24418435_notime"] + console_jobs[ + "25569410_notime" + ].replace("COMPLETED", "RUNNING") mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -559,25 +442,15 @@ def test_simple_not_state(mocker, mock_inquirer): assert output[1].split() == [] -def test_invalid_not_state(mocker, mock_inquirer): +def test_invalid_not_state(mocker, mock_inquirer, console_jobs): """When not state isn't found, return all jobs.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern^|^1548K^|^1^|^4000Mc^|^" - "RUNNING^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - ) + sub_result.stdout = console_jobs["24418435_notime"] + console_jobs[ + "25569410_notime" + ].replace("COMPLETED", "RUNNING") mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -598,25 +471,15 @@ def test_invalid_not_state(mocker, mock_inquirer): assert output[5].split() == [] -def test_no_state(mocker, mock_inquirer): +def test_no_state(mocker, mock_inquirer, console_jobs): """Unknown states produce empty output.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^01:27:42^|^24418435^|^24418435^|^^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.batch^|^24418435.batch^|^499092K^|^1^|^1Gn^|^" - "COMPLETED^|^01:27:29\n" - "^|^1^|^01:27:42^|^24418435.extern^|^24418435.extern^|^1376K^|^1^|^1Gn^|^" - "COMPLETED^|^00:00:00\n" - "^|^1^|^21:14:48^|^25569410^|^25569410^|^^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - "^|^1^|^21:14:49^|^25569410.extern^|^25569410.extern" - "^|^1548K^|^1^|^4000Mc^|^RUNNING^|^00:00:00\n" - "^|^1^|^21:14:43^|^25569410.0^|^25569410.0^|^62328K" - "^|^1^|^4000Mc^|^RUNNING^|^19:28:36\n" - ) + sub_result.stdout = console_jobs["24418435_notime"] + console_jobs[ + "25569410_notime" + ].replace("COMPLETED", "RUNNING") mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, "--no-color --state ZZ 25569410 24418435".split() @@ -638,20 +501,13 @@ def test_no_state(mocker, mock_inquirer): assert output[3] == "" -def test_array_job_raw_id(mocker, mock_inquirer): +def test_array_job_raw_id(mocker, mock_inquirer, console_jobs): """Can find job array by base id.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^00:09:34^|^24220929_421^|^24221219^|^^|^1^|^16000Mn^|^" - "COMPLETED^|^09:28.052\n" - "^|^1^|^00:09:34^|^24220929_421.batch^|^24221219.batch" - "^|^5664932K^|^1^|^16000Mn^|^COMPLETED^|^09:28.051\n" - "^|^1^|^00:09:34^|^24220929_421.extern^|^24221219.extern" - "^|^1404K^|^1^|^16000Mn^|^COMPLETED^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["24221219"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -671,26 +527,13 @@ def test_array_job_raw_id(mocker, mock_inquirer): assert len(output) == 1 -def test_array_job_single(mocker, mock_inquirer): +def test_array_job_single(mocker, mock_inquirer, console_jobs): """Can get single array job element.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^00:09:34^|^24220929_421^|^24221219^|^^|^1^|^16000Mn^|^" - "COMPLETED^|^09:28.052\n" - "^|^1^|^00:09:34^|^24220929_421.batch^|^24221219.batch" - "^|^5664932K^|^1^|^16000Mn^|^COMPLETED^|^09:28.051\n" - "^|^1^|^00:09:34^|^24220929_421.extern^|^24221219.extern" - "^|^1404K^|^1^|^16000Mn^|^COMPLETED^|^00:00:00\n" - "^|^1^|^00:09:33^|^24220929_431^|^24221220^|^^|^1^|^16000Mn^|^" - "PENDING^|^09:27.460\n" - "^|^1^|^00:09:33^|^24220929_431.batch^|^24221220.batch" - "^|^5518572K^|^1^|^16000Mn^|^PENDING^|^09:27.459\n" - "^|^1^|^00:09:33^|^24220929_431.extern^|^24221220.extern" - "^|^1400K^|^1^|^16000Mn^|^PENDING^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["24221219"] + console_jobs["24221220"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -712,26 +555,13 @@ def test_array_job_single(mocker, mock_inquirer): assert len(output) == 1 -def test_array_job_base(mocker, mock_inquirer): +def test_array_job_base(mocker, mock_inquirer, console_jobs): """Base array job id gets all elements.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^1^|^00:09:34^|^24220929_421^|^24221219^|^^|^1^|^16000Mn^|^" - "COMPLETED^|^09:28.052\n" - "^|^1^|^00:09:34^|^24220929_421.batch^|^24221219.batch^|^" - "5664932K^|^1^|^16000Mn^|^COMPLETED^|^09:28.051\n" - "^|^1^|^00:09:34^|^24220929_421.extern^|^24221219.extern^|^" - "1404K^|^1^|^16000Mn^|^COMPLETED^|^00:00:00\n" - "^|^1^|^00:09:33^|^24220929_431^|^24221220^|^^|^1^|^16000Mn^|^" - "PENDING^|^09:27.460\n" - "^|^1^|^00:09:33^|^24220929_431.batch^|^24221220.batch^|^" - "5518572K^|^1^|^16000Mn^|^PENDING^|^09:27.459\n" - "^|^1^|^00:09:33^|^24220929_431.extern^|^24221220.extern^|^" - "1400K^|^1^|^16000Mn^|^PENDING^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["24221219"] + console_jobs["24221220"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke( console.main, @@ -789,19 +619,13 @@ def test_empty_sacct(mocker, mock_inquirer): assert len(output) == 1 -def test_failed_no_mem(mocker, mock_inquirer): +def test_failed_no_mem(mocker, mock_inquirer, console_jobs): """Empty memory entries produce valid output.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^8^|^00:00:12^|^23000381^|^23000381^|^^|^1^|^4000Mc^|^FAILED^|^00:00:00\n" - "^|^8^|^00:00:12^|^23000381.batch^|^23000381.batch^|^^|^1^|^4000Mc^|^" - "FAILED^|^00:00:00\n" - "^|^8^|^00:00:12^|^23000381.extern^|^23000381.extern^|^1592K^|^1^|^4000Mc^|^" - "COMPLETED^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["23000381"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke(console.main, "--no-color 23000381".split()) @@ -812,16 +636,13 @@ def test_failed_no_mem(mocker, mock_inquirer): assert len(output) == 1 -def test_canceled_by_other(mocker, mock_inquirer): +def test_canceled_by_other(mocker, mock_inquirer, console_jobs): """Canceled states are correctly handled.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^16^|^00:00:00^|^23000233^|^23000233^|^^|^1^|^" - "4000Mc^|^CANCELLED by 129319^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["23000233"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke(console.main, "--no-color 23000233 --state CA".split()) @@ -832,27 +653,20 @@ def test_canceled_by_other(mocker, mock_inquirer): "23000233", "CANCELLED", "00:00:00", - "---", + "0.0%", "---", "0.0%", ] assert len(output) == 1 -def test_zero_runtime(mocker, mock_inquirer): +def test_zero_runtime(mocker, mock_inquirer, console_jobs): """Entries with zero runtime produce reasonable timeeff.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 - sub_result.stdout = ( - "^|^8^|^00:00:00^|^23000210^|^23000210^|^^|^1^|^20000Mn^|^" - "FAILED^|^00:00.007\n" - "^|^8^|^00:00:00^|^23000210.batch^|^23000210.batch^|^1988K^|^1^|^20000Mn^|^" - "FAILED^|^00:00.006\n" - "^|^8^|^00:00:00^|^23000210.extern^|^23000210.extern^|^1556K^|^1^|^20000Mn^|^" - "COMPLETED^|^00:00:00\n" - ) + sub_result.stdout = console_jobs["23000210"] mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke(console.main, "--no-color 23000210".split()) @@ -875,56 +689,64 @@ def test_no_systems(mocker, mock_inquirer): assert output[0] == "No supported scheduling systems found!" -def test_issue_16(mocker, mock_inquirer): +def test_issue_16(mocker, mock_inquirer, console_jobs): """Incorrect memory usage for multi-node jobs.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 sub_result.stdout = """ -^|^16^|^07:36:03^|^65638294^|^65638294^|^^|^2^|^32G\ +^|^16^|^07:36:03^|^65638294^|^65638294^|^^|^1^|^2^|^32G\ ^|^COMPLETED^|^6-23:59:00^|^4-23:56:21 ^|^1^|^07:36:03^|^65638294.batch^|^65638294.batch\ -^|^1147220K^|^1^|^^|^COMPLETED^|^^|^07:30:20 +^|^1147220K^|^1^|^1^|^^|^COMPLETED^|^^|^07:30:20 ^|^16^|^07:36:03^|^65638294.extern^|^65638294.extern\ -^|^0^|^2^|^^|^COMPLETED^|^^|^00:00.001 -^|^15^|^00:00:11^|^65638294.0^|^65638294.0^|^0^|^1^|^^|^COMPLETED^|^^|^00:11.830 -^|^15^|^00:02:15^|^65638294.1^|^65638294.1^|^4455540K^|^1^|^^|^COMPLETED^|^^|^31:09.458 -^|^15^|^00:00:10^|^65638294.2^|^65638294.2^|^0^|^1^|^^|^COMPLETED^|^^|^00:00:04 -^|^15^|^00:00:08^|^65638294.3^|^65638294.3^|^0^|^1^|^^|^COMPLETED^|^^|^00:09.602 -^|^15^|^00:00:07^|^65638294.4^|^65638294.4^|^0^|^1^|^^|^COMPLETED^|^^|^00:56.827 -^|^15^|^00:00:06^|^65638294.5^|^65638294.5^|^0^|^1^|^^|^COMPLETED^|^^|^00:03.512 -^|^15^|^00:00:08^|^65638294.6^|^65638294.6^|^0^|^1^|^^|^COMPLETED^|^^|^00:08.520 -^|^15^|^00:00:13^|^65638294.7^|^65638294.7^|^0^|^1^|^^|^COMPLETED^|^^|^01:02.013 -^|^15^|^00:00:02^|^65638294.8^|^65638294.8^|^0^|^1^|^^|^COMPLETED^|^^|^00:03.639 -^|^15^|^00:00:06^|^65638294.9^|^65638294.9^|^0^|^1^|^^|^COMPLETED^|^^|^00:08.683 -^|^15^|^00:00:08^|^65638294.10^|^65638294.10^|^0^|^1^|^^|^COMPLETED^|^^|^00:57.438 -^|^15^|^00:00:06^|^65638294.11^|^65638294.11^|^0^|^1^|^^|^COMPLETED^|^^|^00:03.642 -^|^15^|^00:00:09^|^65638294.12^|^65638294.12^|^0^|^1^|^^|^COMPLETED^|^^|^00:10.271 +^|^0^|^1^|^2^|^^|^COMPLETED^|^^|^00:00.001 +^|^15^|^00:00:11^|^65638294.0^|^65638294.0^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:11.830 +^|^15^|^00:02:15^|^65638294.1^|^65638294.1^|^4455540K\ +^|^1^|^1^|^^|^COMPLETED^|^^|^31:09.458 +^|^15^|^00:00:10^|^65638294.2^|^65638294.2^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:00:04 +^|^15^|^00:00:08^|^65638294.3^|^65638294.3^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:09.602 +^|^15^|^00:00:07^|^65638294.4^|^65638294.4^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:56.827 +^|^15^|^00:00:06^|^65638294.5^|^65638294.5^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:03.512 +^|^15^|^00:00:08^|^65638294.6^|^65638294.6^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:08.520 +^|^15^|^00:00:13^|^65638294.7^|^65638294.7^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^01:02.013 +^|^15^|^00:00:02^|^65638294.8^|^65638294.8^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:03.639 +^|^15^|^00:00:06^|^65638294.9^|^65638294.9^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:08.683 +^|^15^|^00:00:08^|^65638294.10^|^65638294.10^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:57.438 +^|^15^|^00:00:06^|^65638294.11^|^65638294.11^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:03.642 +^|^15^|^00:00:09^|^65638294.12^|^65638294.12^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:10.271 ^|^15^|^00:01:24^|^65638294.13^|^65638294.13^|^4149700K\ -^|^1^|^^|^COMPLETED^|^^|^17:18.067 -^|^15^|^00:00:01^|^65638294.14^|^65638294.14^|^0^|^1^|^^|^COMPLETED^|^^|^00:03.302 -^|^15^|^00:00:10^|^65638294.15^|^65638294.15^|^0^|^1^|^^|^COMPLETED^|^^|^00:14.615 -^|^15^|^00:06:45^|^65638294.16^|^65638294.16^|^4748052K^|^1^|^^|^COMPLETED^|^^|^01:36:40 -^|^15^|^00:00:10^|^65638294.17^|^65638294.17^|^0^|^1^|^^|^COMPLETED^|^^|^00:03.864 -^|^15^|^00:00:09^|^65638294.18^|^65638294.18^|^0^|^1^|^^|^COMPLETED^|^^|^00:48.987 -^|^15^|^01:32:53^|^65638294.19^|^65638294.19^|^7734356K^|^1^|^^|^COMPLETED^|^^|^23:09:33 -^|^15^|^00:00:01^|^65638294.20^|^65638294.20^|^0^|^1^|^^|^COMPLETED^|^^|^00:03.520 -^|^15^|^00:00:07^|^65638294.21^|^65638294.21^|^0^|^1^|^^|^COMPLETED^|^^|^00:50.015 -^|^15^|^00:55:17^|^65638294.22^|^65638294.22^|^8074500K^|^1^|^^|^COMPLETED^|^^|^13:45:29 -^|^15^|^00:00:13^|^65638294.23^|^65638294.23^|^0^|^1^|^^|^COMPLETED^|^^|^00:04.413 -^|^15^|^00:00:12^|^65638294.24^|^65638294.24^|^0^|^1^|^^|^COMPLETED^|^^|^00:49.100 -^|^15^|^00:57:41^|^65638294.25^|^65638294.25^|^7883152K^|^1^|^^|^COMPLETED^|^^|^14:20:36 -^|^15^|^00:00:01^|^65638294.26^|^65638294.26^|^0^|^1^|^^|^COMPLETED^|^^|^00:03.953 -^|^15^|^00:00:05^|^65638294.27^|^65638294.27^|^0^|^1^|^^|^COMPLETED^|^^|^00:47.223 -^|^15^|^01:00:17^|^65638294.28^|^65638294.28^|^7715752K^|^1^|^^|^COMPLETED^|^^|^14:59:40 -^|^15^|^00:00:06^|^65638294.29^|^65638294.29^|^0^|^1^|^^|^COMPLETED^|^^|^00:04.341 -^|^15^|^00:00:07^|^65638294.30^|^65638294.30^|^0^|^1^|^^|^COMPLETED^|^^|^00:50.416 -^|^15^|^01:22:31^|^65638294.31^|^65638294.31^|^7663264K^|^1^|^^|^COMPLETED^|^^|^20:33:59 -^|^15^|^00:00:05^|^65638294.32^|^65638294.32^|^0^|^1^|^^|^COMPLETED^|^^|^00:04.199 -^|^15^|^00:00:08^|^65638294.33^|^65638294.33^|^0^|^1^|^^|^COMPLETED^|^^|^00:50.009 -^|^15^|^01:32:23^|^65638294.34^|^65638294.34^|^7764884K^|^1^|^^|^COMPLETED^|^^|^23:01:52 -^|^15^|^00:00:06^|^65638294.35^|^65638294.35^|^0^|^1^|^^|^COMPLETED^|^^|^00:04.527 +^|^1^|^1^|^^|^COMPLETED^|^^|^17:18.067 +^|^15^|^00:00:01^|^65638294.14^|^65638294.14^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:03.302 +^|^15^|^00:00:10^|^65638294.15^|^65638294.15^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:14.615 +^|^15^|^00:06:45^|^65638294.16^|^65638294.16^|^4748052K\ +^|^1^|^1^|^^|^COMPLETED^|^^|^01:36:40 +^|^15^|^00:00:10^|^65638294.17^|^65638294.17^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:03.864 +^|^15^|^00:00:09^|^65638294.18^|^65638294.18^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:48.987 +^|^15^|^01:32:53^|^65638294.19^|^65638294.19^|^7734356K\ +^|^1^|^1^|^^|^COMPLETED^|^^|^23:09:33 +^|^15^|^00:00:01^|^65638294.20^|^65638294.20^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:03.520 +^|^15^|^00:00:07^|^65638294.21^|^65638294.21^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:50.015 +^|^15^|^00:55:17^|^65638294.22^|^65638294.22^|^8074500K\ +^|^1^|^1^|^^|^COMPLETED^|^^|^13:45:29 +^|^15^|^00:00:13^|^65638294.23^|^65638294.23^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:04.413 +^|^15^|^00:00:12^|^65638294.24^|^65638294.24^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:49.100 +^|^15^|^00:57:41^|^65638294.25^|^65638294.25^|^7883152K\ +^|^1^|^1^|^^|^COMPLETED^|^^|^14:20:36 +^|^15^|^00:00:01^|^65638294.26^|^65638294.26^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:03.953 +^|^15^|^00:00:05^|^65638294.27^|^65638294.27^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:47.223 +^|^15^|^01:00:17^|^65638294.28^|^65638294.28^|^7715752K\ +^|^1^|^1^|^^|^COMPLETED^|^^|^14:59:40 +^|^15^|^00:00:06^|^65638294.29^|^65638294.29^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:04.341 +^|^15^|^00:00:07^|^65638294.30^|^65638294.30^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:50.416 +^|^15^|^01:22:31^|^65638294.31^|^65638294.31^|^7663264K\ +^|^1^|^1^|^^|^COMPLETED^|^^|^20:33:59 +^|^15^|^00:00:05^|^65638294.32^|^65638294.32^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:04.199 +^|^15^|^00:00:08^|^65638294.33^|^65638294.33^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:50.009 +^|^15^|^01:32:23^|^65638294.34^|^65638294.34^|^7764884K\ +^|^1^|^1^|^^|^COMPLETED^|^^|^23:01:52 +^|^15^|^00:00:06^|^65638294.35^|^65638294.35^|^0^|^1^|^1^|^^|^COMPLETED^|^^|^00:04.527 """ mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) result = runner.invoke(console.main, "--no-color 65638294".split()) @@ -943,36 +765,36 @@ def test_issue_16(mocker, mock_inquirer): assert len(output) == 1 -def test_energy_reporting(mocker, mock_inquirer): +def test_energy_reporting(mocker, mock_inquirer, console_jobs): """Include energy reporting with the `energy` format code.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner() sub_result = mocker.MagicMock() sub_result.returncode = 0 sub_result.stdout = ( - "^|^32^|^00:01:09^|^37403870_1^|^37403937^|^^|^1^|^32000M^|^" + "^|^32^|^00:01:09^|^37403870_1^|^37403937^|^^|^1^|^1^|^32000M^|^" "COMPLETED^|^^|^00:02:00^|^00:47.734\n" - "^|^32^|^00:01:09^|^37403870_1.batch^|^37403937.batch^|^6300K^|^1^|^^|^" + "^|^32^|^00:01:09^|^37403870_1.batch^|^37403937.batch^|^6300K^|^1^|^1^|^^|^" "COMPLETED^|^energy=33,fs/disk=0^|^^|^00:47.733\n" - "^|^32^|^00:01:09^|^37403870_1.extern^|^37403937.extern^|^4312K^|^1^|^^|^" + "^|^32^|^00:01:09^|^37403870_1.extern^|^37403937.extern^|^4312K^|^1^|^1^|^^|^" "COMPLETED^|^energy=33,fs/disk=0^|^^|^00:00.001\n" - "^|^32^|^00:01:21^|^37403870_2^|^37403938^|^^|^1^|^32000M^|^" + "^|^32^|^00:01:21^|^37403870_2^|^37403938^|^^|^1^|^1^|^32000M^|^" "COMPLETED^|^^|^00:02:00^|^00:41.211\n" - "^|^32^|^00:01:21^|^37403870_2.batch^|^37403938.batch^|^6316K^|^1^|^^|^" + "^|^32^|^00:01:21^|^37403870_2.batch^|^37403938.batch^|^6316K^|^1^|^1^|^^|^" "COMPLETED^|^energy=32,fs/disk=0^|^^|^00:41.210\n" - "^|^32^|^00:01:21^|^37403870_2.extern^|^37403938.extern^|^4312K^|^1^|^^|^" + "^|^32^|^00:01:21^|^37403870_2.extern^|^37403938.extern^|^4312K^|^1^|^1^|^^|^" "COMPLETED^|^energy=32,fs/disk=0^|^^|^00:00:00\n" - "^|^32^|^00:01:34^|^37403870_3^|^37403939^|^^|^1^|^32000M^|^" + "^|^32^|^00:01:34^|^37403870_3^|^37403939^|^^|^1^|^1^|^32000M^|^" "COMPLETED^|^^|^00:02:00^|^00:51.669\n" - "^|^32^|^00:01:34^|^37403870_3.batch^|^37403939.batch^|^6184K^|^1^|^^|^" + "^|^32^|^00:01:34^|^37403870_3.batch^|^37403939.batch^|^6184K^|^1^|^1^|^^|^" "COMPLETED^|^energy=30,fs/disk=0^|^^|^00:51.667\n" - "^|^32^|^00:01:35^|^37403870_3.extern^|^37403939.extern^|^4312K^|^1^|^^|^" + "^|^32^|^00:01:35^|^37403870_3.extern^|^37403939.extern^|^4312K^|^1^|^1^|^^|^" "COMPLETED^|^fs/disk=0,energy=30^|^^|^00:00.001\n" - "^|^32^|^00:01:11^|^37403870_4^|^37403870^|^^|^1^|^32000M^|^" + "^|^32^|^00:01:11^|^37403870_4^|^37403870^|^^|^1^|^1^|^32000M^|^" "COMPLETED^|^^|^00:02:00^|^01:38.184\n" - "^|^32^|^00:01:11^|^37403870_4.batch^|^37403870.batch^|^6300K^|^1^|^^|^" + "^|^32^|^00:01:11^|^37403870_4.batch^|^37403870.batch^|^6300K^|^1^|^1^|^^|^" "COMPLETED^|^fs/disk=0^|^^|^01:38.183\n" - "^|^32^|^00:01:11^|^37403870_4.extern^|^37403870.extern^|^4312K^|^1^|^^|^" + "^|^32^|^00:01:11^|^37403870_4.extern^|^37403870.extern^|^4312K^|^1^|^1^|^^|^" "COMPLETED^|^energy=27,fs/disk=0^|^^|^00:00.001\n" ) mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result) @@ -1028,7 +850,7 @@ def test_energy_reporting(mocker, mock_inquirer): assert len(output) == 5 -def test_extra_args(mocker, mock_inquirer): +def test_extra_args(mocker, mock_inquirer, console_jobs): """Can add extra arguments for sacct.""" mocker.patch("reportseff.console.which", return_value=True) runner = CliRunner()
Wrong memory efficiency when using "srun" Hello, probably related to #37, but a bit different, so I thought I'd open a new issue. When I run: `srun -n 8 stress -m 1 -t 52 --vm-keep --vm-bytes 1800M ` I use 8 CPUs and almost 16GB, but `reportseff` gets the CPU efficiency OK, but the memory efficiency way off (it basically reports I only used 1800M). ``` $ seff 131042 ######################## JOB EFFICIENCY REPORT ######################## # Job ID: 131042 # State: COMPLETED (exit code 0) # Cores: 8 # CPU Utilized: 00:06:58 # CPU Efficiency: 98.58% of 00:07:04 core-walltime # Wall-clock time: 00:00:53 # Memory Utilized: 14.86 GB (estimated maximum) ####################################################################### $ reportseff --debug 131042 ^|^8^|^00:00:53^|^131042^|^131042^|^^|^1^|^16000M^|^COMPLETED^|^00:01:00^|^06:57.815 ^|^8^|^00:00:53^|^131042.batch^|^131042.batch^|^20264K^|^1^|^^|^COMPLETED^|^^|^00:00.034 ^|^8^|^00:00:53^|^131042.extern^|^131042.extern^|^1052K^|^1^|^^|^COMPLETED^|^^|^00:00.001 ^|^8^|^00:00:53^|^131042.0^|^131042.0^|^1947276K^|^1^|^^|^COMPLETED^|^^|^06:57.779 JobID State Elapsed TimeEff CPUEff MemEff 131042 COMPLETED 00:00:53 88.3% 98.3% 11.9% ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_job.py::test_multinode_job_issue_41", "tests/test_output_renderer.py::test_renderer_init", "tests/test_output_renderer.py::test_renderer_correct_columns", "tests/test_reportseff.py::test_directory_input", "tests/test_reportseff.py::test_debug_option", "tests/test_reportseff.py::test_process_failure", "tests/test_reportseff.py::test_short_output", "tests/test_reportseff.py::test_long_output", "tests/test_reportseff.py::test_simple_job", "tests/test_reportseff.py::test_simple_user", "tests/test_reportseff.py::test_simple_partition", "tests/test_reportseff.py::test_since", "tests/test_reportseff.py::test_since_all_users", "tests/test_reportseff.py::test_since_all_users_partition", "tests/test_reportseff.py::test_parsable", "tests/test_reportseff.py::test_simple_state", "tests/test_reportseff.py::test_simple_not_state", "tests/test_reportseff.py::test_invalid_not_state", "tests/test_reportseff.py::test_array_job_raw_id", "tests/test_reportseff.py::test_array_job_single", "tests/test_reportseff.py::test_array_job_base", "tests/test_reportseff.py::test_failed_no_mem", "tests/test_reportseff.py::test_canceled_by_other", "tests/test_reportseff.py::test_zero_runtime", "tests/test_reportseff.py::test_issue_16", "tests/test_reportseff.py::test_energy_reporting" ]
[ "tests/test_job.py::test_eq", "tests/test_job.py::test_repr", "tests/test_job.py::test_job_init", "tests/test_job.py::test_update_main_job", "tests/test_job.py::test_update_main_job_unlimited", "tests/test_job.py::test_update_main_job_partition_limit", "tests/test_job.py::test_update_part_job", "tests/test_job.py::test_parse_bug", "tests/test_job.py::test_name", "tests/test_job.py::test_get_entry", "tests/test_job.py::test_parse_slurm_timedelta", "tests/test_job.py::test_parsemem_nodes", "tests/test_job.py::test_parsemem_cpus", "tests/test_job.py::test_parsememstep", "tests/test_job.py::test_unknown_admin_comment", "tests/test_job.py::test_single_core", "tests/test_job.py::test_multi_node", "tests/test_job.py::test_single_gpu", "tests/test_job.py::test_multi_gpu", "tests/test_job.py::test_multi_node_multi_gpu", "tests/test_job.py::test_short_job", "tests/test_job.py::test_bad_gpu", "tests/test_job.py::test_bad_gpu_utilization", "tests/test_job.py::test_issue_26", "tests/test_job.py::test_multinode_job", "tests/test_output_renderer.py::test_renderer_build_formatters", "tests/test_output_renderer.py::test_renderer_validate_formatters", "tests/test_output_renderer.py::test_renderer_validate_formatters_with_node", "tests/test_output_renderer.py::test_renderer_format_jobs", "tests/test_output_renderer.py::test_renderer_format_jobs_multi_node", "tests/test_output_renderer.py::test_renderer_format_jobs_multi_node_with_nodes", "tests/test_output_renderer.py::test_renderer_format_jobs_multi_node_with_nodes_and_gpu", "tests/test_output_renderer.py::test_format_jobs_empty", "tests/test_output_renderer.py::test_format_jobs_single_str", "tests/test_output_renderer.py::test_formatter_init", "tests/test_output_renderer.py::test_formatter_eq", "tests/test_output_renderer.py::test_formatter_validate_title", "tests/test_output_renderer.py::test_formatter_compute_width", "tests/test_output_renderer.py::test_formatter_format_entry", "tests/test_reportseff.py::test_directory_input_exception", "tests/test_reportseff.py::test_format_add", "tests/test_reportseff.py::test_no_state", "tests/test_reportseff.py::test_sacct_error", "tests/test_reportseff.py::test_empty_sacct", "tests/test_reportseff.py::test_no_systems", "tests/test_reportseff.py::test_extra_args" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-09-14T14:32:03Z"
mit
tryolabs__norfair-198
diff --git a/norfair/tracker.py b/norfair/tracker.py index 1a26b07..0a80b97 100644 --- a/norfair/tracker.py +++ b/norfair/tracker.py @@ -438,18 +438,14 @@ class TrackedObject: reid_hit_counter_max: Optional[int], abs_to_rel: Callable[[np.array], np.array], ): - try: - initial_detection_points = validate_points( - initial_detection.absolute_points - ) - except AttributeError: + if not isinstance(initial_detection, Detection): print( f"\n[red]ERROR[/red]: The detection list fed into `tracker.update()` should be composed of {Detection} objects not {type(initial_detection)}.\n" ) exit() - self.dim_points = initial_detection_points.shape[1] - self.num_points = initial_detection_points.shape[0] + self.dim_points = initial_detection.absolute_points.shape[1] + self.num_points = initial_detection.absolute_points.shape[0] self.hit_counter_max: int = hit_counter_max self.pointwise_hit_counter_max: int = pointwise_hit_counter_max self.initialization_delay = initialization_delay @@ -487,7 +483,7 @@ class TrackedObject: self.past_detections: Sequence["Detection"] = [] # Create Kalman Filter - self.filter = filter_factory.create_filter(initial_detection_points) + self.filter = filter_factory.create_filter(initial_detection.absolute_points) self.dim_z = self.dim_points * self.num_points self.label = initial_detection.label self.abs_to_rel = abs_to_rel @@ -550,7 +546,6 @@ class TrackedObject: return self.point_hit_counter > 0 def hit(self, detection: "Detection", period: int = 1): - points = validate_points(detection.absolute_points) self._conditionally_add_to_past_detections(detection) self.last_detection = detection @@ -580,7 +575,9 @@ class TrackedObject: self.point_hit_counter[self.point_hit_counter < 0] = 0 H_vel = np.zeros(H_pos.shape) # But we don't directly measure velocity H = np.hstack([H_pos, H_vel]) - self.filter.update(np.expand_dims(points.flatten(), 0).T, None, H) + self.filter.update( + np.expand_dims(detection.absolute_points.flatten(), 0).T, None, H + ) # Force points being detected for the first time to have velocity = 0 # This is needed because some detectors (like OpenPose) set points with @@ -600,7 +597,7 @@ class TrackedObject: ) self.filter.x[: self.dim_z][first_detection_mask] = np.expand_dims( - points.flatten(), 0 + detection.absolute_points.flatten(), 0 ).T[first_detection_mask] self.filter.x[self.dim_z :][np.logical_not(detected_at_least_once_mask)] = 0 @@ -690,9 +687,9 @@ class Detection: label: Hashable = None, embedding=None, ): - self.points = points + self.points = validate_points(points) self.scores = scores self.data = data self.label = label - self.absolute_points = points.copy() + self.absolute_points = self.points.copy() self.embedding = embedding
tryolabs/norfair
062c4330bd75ec3632ff88a97a516a665a718ad2
diff --git a/tests/conftest.py b/tests/conftest.py index e4162cd..45c5526 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,8 @@ import numpy as np import pytest +from norfair.utils import validate_points + @pytest.fixture def mock_det(): @@ -30,3 +32,23 @@ def mock_obj(mock_det): self.last_detection = mock_det(points, scores=scores) return FakeTrackedObject + + [email protected] +def mock_coordinate_transformation(): + + # simple mock to return abs or relative positions + class TransformMock: + def __init__(self, relative_points, absolute_points) -> None: + self.absolute_points = validate_points(absolute_points) + self.relative_points = validate_points(relative_points) + + def abs_to_rel(self, points): + np.testing.assert_equal(points, self.absolute_points) + return self.relative_points + + def rel_to_abs(self, points): + np.testing.assert_equal(points, self.relative_points) + return self.absolute_points + + return TransformMock diff --git a/tests/test_tracker.py b/tests/test_tracker.py index ffdeb45..bb6da11 100644 --- a/tests/test_tracker.py +++ b/tests/test_tracker.py @@ -9,6 +9,7 @@ from norfair import ( OptimizedKalmanFilterFactory, Tracker, ) +from norfair.utils import validate_points def test_params(): @@ -154,6 +155,68 @@ def test_distance_t(filter_factory): assert 4 < tracked_objects[0].estimate[0][1] <= 4.5 [email protected]( + "filter_factory", [FilterPyKalmanFilterFactory(), OptimizedKalmanFilterFactory()] +) +def test_1d_points(filter_factory, mock_coordinate_transformation): + # + # Test a detection with rank 1 + # + tracker = Tracker( + "frobenius", + initialization_delay=0, + distance_threshold=1, + filter_factory=filter_factory, + ) + detection = Detection(points=np.array([1, 1])) + assert detection.points.shape == (1, 2) + assert detection.absolute_points.shape == (1, 2) + tracked_objects = tracker.update([detection]) + assert len(tracked_objects) == 1 + tracked_object = tracked_objects[0] + assert tracked_object.estimate.shape == (1, 2) + + +def test_camera_motion(mock_coordinate_transformation): + # + # Simple test for camera motion + # + for one_d in [True, False]: + tracker = Tracker("frobenius", 1, initialization_delay=0) + if one_d: + absolute_points = np.array([1, 1]) + else: + absolute_points = np.array([[1, 1]]) + + relative_points = absolute_points + 1 + + coord_transformation_mock = mock_coordinate_transformation( + relative_points=relative_points, absolute_points=absolute_points + ) + + detection = Detection(relative_points) + tracked_objects = tracker.update( + [detection], coord_transformations=coord_transformation_mock + ) + + # assert that the detection was correctly updated + np.testing.assert_equal( + detection.absolute_points, validate_points(absolute_points) + ) + np.testing.assert_equal(detection.points, validate_points(relative_points)) + + # check the tracked_object + assert len(tracked_objects) == 1 + obj = tracked_objects[0] + np.testing.assert_almost_equal( + obj.get_estimate(absolute=False), validate_points(relative_points) + ) + np.testing.assert_almost_equal( + obj.get_estimate(absolute=True), validate_points(absolute_points) + ) + np.testing.assert_almost_equal(obj.estimate, validate_points(relative_points)) + + # TODO tests list: # - detections with different labels # - partial matches where some points are missing
Absolute paths drawing with centroids We only allow drawing the absolute paths of bounding boxes. To draw an absolute path with centroids points I need to do a trick simulating a bounding box duplicating the centroid coordinate. Example: ``` centroid = np.array( [ [detection_as_xywh[0].item(), detection_as_xywh[1].item()], [detection_as_xywh[0].item(), detection_as_xywh[1].item()], ] ) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_tracker.py::test_1d_points[filter_factory0]", "tests/test_tracker.py::test_1d_points[filter_factory1]", "tests/test_tracker.py::test_camera_motion" ]
[ "tests/test_tracker.py::test_params", "tests/test_tracker.py::test_simple[filter_factory0]", "tests/test_tracker.py::test_simple[filter_factory1]", "tests/test_tracker.py::test_moving[filter_factory0]", "tests/test_tracker.py::test_moving[filter_factory1]", "tests/test_tracker.py::test_distance_t[filter_factory0]", "tests/test_tracker.py::test_distance_t[filter_factory1]" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-09-29T18:25:28Z"
bsd-3-clause
tsdat__tsdat-23
diff --git a/tsdat/pipeline/ingest_pipeline.py b/tsdat/pipeline/ingest_pipeline.py index f5dea5b..ef439d5 100644 --- a/tsdat/pipeline/ingest_pipeline.py +++ b/tsdat/pipeline/ingest_pipeline.py @@ -13,7 +13,7 @@ class IngestPipeline(Pipeline): applying quality checks and quality controls, and by saving the now-processed data in a standard file format.""" - def run(self, filepath: Union[str, List[str]]) -> None: + def run(self, filepath: Union[str, List[str]]) -> xr.Dataset: """Runs the IngestPipeline from start to finish. :param filepath: @@ -48,6 +48,8 @@ class IngestPipeline(Pipeline): # Hook to generate custom plots self.hook_generate_and_persist_plots(dataset) + return dataset + def hook_customize_dataset( self, dataset: xr.Dataset, raw_mapping: Dict[str, xr.Dataset] ) -> xr.Dataset:
tsdat/tsdat
b5e1b0c6c7c94de86175b2b18f6b7fbc2c33cac8
diff --git a/tests/test_examples.py b/tests/test_examples.py index e7c2a58..ff16cd0 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -67,25 +67,21 @@ def pipeline_produced_expected_directory_tree(pipeline: IngestPipeline) -> bool: return True -def pipeline_produced_expected_data( - pipeline: IngestPipeline, expected_data_file: str -) -> bool: - filename = os.path.basename(expected_data_file) - - # Retrieve the output data file - loc_id = pipeline.config.pipeline_definition.location_id - datastream = DSUtil.get_datastream_name(config=pipeline.config) - root: str = pipeline.storage._root - output_file = os.path.join(root, loc_id, datastream, filename) - - # Assert that the basename of the processed file and expected file match - assert os.path.isfile(output_file) - - # Compare data and optionally attributes to ensure everything matches. - ds_out: xr.Dataset = xr.open_dataset(output_file) - ds_exp: xr.Dataset = xr.open_dataset(expected_data_file) - - return ds_out.equals(ds_exp) +def execute_test( + storage_config: str, + pipeline_config: str, + pipeline: IngestPipeline, + input_filepath: str, + expected_filepath: str, +): + delete_existing_outputs(storage_config) + add_pipeline_module_to_path(storage_config) + + _pipeline = pipeline(pipeline_config, storage_config) + ds = _pipeline.run(input_filepath) + expected_ds = xr.open_dataset(expected_filepath) + xr.testing.assert_allclose(ds, expected_ds) + assert pipeline_produced_expected_directory_tree(_pipeline) def test_a2e_buoy_ingest_example(): @@ -98,23 +94,20 @@ def test_a2e_buoy_ingest_example(): STORAGE_CONFIG, ) - delete_existing_outputs(STORAGE_CONFIG) - - add_pipeline_module_to_path(STORAGE_CONFIG) - - humboldt_pipeline = BuoyIngestPipeline(HUMBOLDT_CONFIG, STORAGE_CONFIG) - morro_pipeline = BuoyIngestPipeline(MORRO_CONFIG, STORAGE_CONFIG) - - humboldt_pipeline.run(HUMBOLDT_FILE) - morro_pipeline.run(MORRO_FILE) - - assert pipeline_produced_expected_directory_tree(humboldt_pipeline) - assert pipeline_produced_expected_directory_tree(morro_pipeline) - - assert pipeline_produced_expected_data( - humboldt_pipeline, EXPECTED_HUMBOLDT_BUOY_FILE + execute_test( + storage_config=STORAGE_CONFIG, + pipeline_config=HUMBOLDT_CONFIG, + pipeline=BuoyIngestPipeline, + input_filepath=HUMBOLDT_FILE, + expected_filepath=EXPECTED_HUMBOLDT_BUOY_FILE, + ) + execute_test( + storage_config=STORAGE_CONFIG, + pipeline_config=MORRO_CONFIG, + pipeline=BuoyIngestPipeline, + input_filepath=MORRO_FILE, + expected_filepath=EXPECTED_MORRO_BUOY_FILE, ) - assert pipeline_produced_expected_data(morro_pipeline, EXPECTED_MORRO_BUOY_FILE) def test_a2e_imu_ingest_example(): @@ -127,23 +120,20 @@ def test_a2e_imu_ingest_example(): STORAGE_CONFIG, ) - delete_existing_outputs(STORAGE_CONFIG) - - add_pipeline_module_to_path(STORAGE_CONFIG) - - humboldt_pipeline = ImuIngestPipeline(HUMBOLDT_CONFIG, STORAGE_CONFIG) - morro_pipeline = ImuIngestPipeline(MORRO_CONFIG, STORAGE_CONFIG) - - humboldt_pipeline.run(HUMBOLDT_FILE) - morro_pipeline.run(MORRO_FILE) - - assert pipeline_produced_expected_directory_tree(humboldt_pipeline) - assert pipeline_produced_expected_directory_tree(morro_pipeline) - - assert pipeline_produced_expected_data( - humboldt_pipeline, EXPECTED_HUMBOLDT_IMU_FILE + execute_test( + storage_config=STORAGE_CONFIG, + pipeline_config=HUMBOLDT_CONFIG, + pipeline=ImuIngestPipeline, + input_filepath=HUMBOLDT_FILE, + expected_filepath=EXPECTED_HUMBOLDT_IMU_FILE, + ) + execute_test( + storage_config=STORAGE_CONFIG, + pipeline_config=MORRO_CONFIG, + pipeline=ImuIngestPipeline, + input_filepath=MORRO_FILE, + expected_filepath=EXPECTED_MORRO_IMU_FILE, ) - assert pipeline_produced_expected_data(morro_pipeline, EXPECTED_MORRO_IMU_FILE) def test_a2e_lidar_ingest_example(): @@ -156,23 +146,20 @@ def test_a2e_lidar_ingest_example(): STORAGE_CONFIG, ) - delete_existing_outputs(STORAGE_CONFIG) - - add_pipeline_module_to_path(STORAGE_CONFIG) - - humboldt_pipeline = LidarIngestPipeline(HUMBOLDT_CONFIG, STORAGE_CONFIG) - morro_pipeline = LidarIngestPipeline(MORRO_CONFIG, STORAGE_CONFIG) - - humboldt_pipeline.run(HUMBOLDT_FILE) - morro_pipeline.run(MORRO_FILE) - - assert pipeline_produced_expected_directory_tree(humboldt_pipeline) - assert pipeline_produced_expected_directory_tree(morro_pipeline) - - assert pipeline_produced_expected_data( - humboldt_pipeline, EXPECTED_HUMBOLDT_LIDAR_FILE + execute_test( + storage_config=STORAGE_CONFIG, + pipeline_config=HUMBOLDT_CONFIG, + pipeline=LidarIngestPipeline, + input_filepath=HUMBOLDT_FILE, + expected_filepath=EXPECTED_HUMBOLDT_LIDAR_FILE, + ) + execute_test( + storage_config=STORAGE_CONFIG, + pipeline_config=MORRO_CONFIG, + pipeline=LidarIngestPipeline, + input_filepath=MORRO_FILE, + expected_filepath=EXPECTED_MORRO_LIDAR_FILE, ) - assert pipeline_produced_expected_data(morro_pipeline, EXPECTED_MORRO_LIDAR_FILE) def test_a2e_waves_ingest_example(): @@ -185,20 +172,17 @@ def test_a2e_waves_ingest_example(): STORAGE_CONFIG, ) - delete_existing_outputs(STORAGE_CONFIG) - - add_pipeline_module_to_path(STORAGE_CONFIG) - - humboldt_pipeline = WaveIngestPipeline(HUMBOLDT_CONFIG, STORAGE_CONFIG) - morro_pipeline = WaveIngestPipeline(MORRO_CONFIG, STORAGE_CONFIG) - - humboldt_pipeline.run(HUMBOLDT_FILE) - morro_pipeline.run(MORRO_FILE) - - assert pipeline_produced_expected_directory_tree(humboldt_pipeline) - assert pipeline_produced_expected_directory_tree(morro_pipeline) - - assert pipeline_produced_expected_data( - humboldt_pipeline, EXPECTED_HUMBOLDT_WAVES_FILE + execute_test( + storage_config=STORAGE_CONFIG, + pipeline_config=HUMBOLDT_CONFIG, + pipeline=WaveIngestPipeline, + input_filepath=HUMBOLDT_FILE, + expected_filepath=EXPECTED_HUMBOLDT_WAVES_FILE, + ) + execute_test( + storage_config=STORAGE_CONFIG, + pipeline_config=MORRO_CONFIG, + pipeline=WaveIngestPipeline, + input_filepath=MORRO_FILE, + expected_filepath=EXPECTED_MORRO_WAVES_FILE, ) - assert pipeline_produced_expected_data(morro_pipeline, EXPECTED_MORRO_WAVES_FILE)
`IngestPipeline.run(*)` should return the processed `xarray.Dataset()` object
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_examples.py::test_a2e_buoy_ingest_example", "tests/test_examples.py::test_a2e_imu_ingest_example", "tests/test_examples.py::test_a2e_lidar_ingest_example", "tests/test_examples.py::test_a2e_waves_ingest_example" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2021-11-22T23:29:48Z"
bsd-2-clause
tsifrer__python-twitch-client-49
diff --git a/twitch/api/search.py b/twitch/api/search.py index 98ae2e9..415e8e3 100644 --- a/twitch/api/search.py +++ b/twitch/api/search.py @@ -16,7 +16,7 @@ class Search(TwitchAPI): 'offset': offset } response = self._request_get('search/channels', params=params) - return [Channel.construct_from(x) for x in response['channels']] + return [Channel.construct_from(x) for x in response['channels'] or []] def games(self, query, live=False): params = { @@ -24,7 +24,7 @@ class Search(TwitchAPI): 'live': live, } response = self._request_get('search/games', params=params) - return [Game.construct_from(x) for x in response['games']] + return [Game.construct_from(x) for x in response['games'] or []] def streams(self, query, limit=25, offset=0, hls=None): if limit > 100: @@ -38,4 +38,4 @@ class Search(TwitchAPI): 'hls': hls } response = self._request_get('search/streams', params=params) - return [Stream.construct_from(x) for x in response['streams']] + return [Stream.construct_from(x) for x in response['streams'] or []]
tsifrer/python-twitch-client
120d8c3fb2c31d035a166062e05606e1d5ec69c4
diff --git a/tests/api/test_search.py b/tests/api/test_search.py index 9869ea3..3789831 100644 --- a/tests/api/test_search.py +++ b/tests/api/test_search.py @@ -62,6 +62,23 @@ def test_channels_raises_if_wrong_params_are_passed_in(param, value): client.search.channels('mah query', **kwargs) [email protected] +def test_channels_does_not_raise_if_no_channels_were_found(): + response = {'channels': None} + responses.add(responses.GET, + '{}search/channels'.format(BASE_URL), + body=json.dumps(response), + status=200, + content_type='application/json') + + client = TwitchClient('client id') + + channels = client.search.channels('mah bad query') + + assert len(responses.calls) == 1 + assert len(channels) == 0 + + @responses.activate def test_games(): response = { @@ -86,6 +103,23 @@ def test_games(): assert game.name == example_game['name'] [email protected] +def test_games_does_not_raise_if_no_games_were_found(): + response = {'games': None} + responses.add(responses.GET, + '{}search/games'.format(BASE_URL), + body=json.dumps(response), + status=200, + content_type='application/json') + + client = TwitchClient('client id') + + games = client.search.games('mah bad query') + + assert len(responses.calls) == 1 + assert len(games) == 0 + + @responses.activate def test_streams(): response = { @@ -123,3 +157,20 @@ def test_streams_raises_if_wrong_params_are_passed_in(param, value): kwargs = {param: value} with pytest.raises(TwitchAttributeException): client.search.streams('mah query', **kwargs) + + [email protected] +def test_streams_does_not_raise_if_no_streams_were_found(): + response = {'streams': None} + responses.add(responses.GET, + '{}search/streams'.format(BASE_URL), + body=json.dumps(response), + status=200, + content_type='application/json') + + client = TwitchClient('client id') + + streams = client.search.streams('mah bad query') + + assert len(responses.calls) == 1 + assert len(streams) == 0
TypeError upon empty games search results. ## Description Upon searching for a game using the `games()` function in `twitch.api.search.Search`, if the query yields no results a `TypeError` is thrown. ## Expected Behavior `games()` should return an empty list like the rest of the Search functions when no results are returned from Twitch. ## Actual Behavior Since the search function tries to perform a list comprehension on the `None` response of `self._request_get()`, a TypeError is raised and the program is terminated. ## Possible Fix Do a quick sanity check on the datatype of `response` from `self._request_get()` in games() before trying to parse it. ## Steps to Reproduce Create a client instance and search for a game title that doesn't exist. ```python from twitch import TwitchClient client = TwitchClient('<my client id>') client.search.games('This is not a valid game title') ``` This should cause a TypeError to get raised with the following output: ``` Traceback (most recent call last): File "crash_test.py", line 3, in <module> client.search.games('This is not a valid game title') File "C:\Python36\lib\site-packages\twitch\api\search.py", line 27, in games return [Game.construct_from(x) for x in response['games']] TypeError: 'NoneType' object is not iterable ``` ## Context I was doing some exploratory research into making a small script to sync a private streaming community's live stream metadata (Game title, stream title, etc) to their twitch restream. ## My Environment * Windows 10 * Python 3.6.4 * python-twitch-client 0.5.1 installed via pip3
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/api/test_search.py::test_channels_does_not_raise_if_no_channels_were_found", "tests/api/test_search.py::test_games_does_not_raise_if_no_games_were_found", "tests/api/test_search.py::test_streams_does_not_raise_if_no_streams_were_found" ]
[ "tests/api/test_search.py::test_streams_raises_if_wrong_params_are_passed_in[limit-101]", "tests/api/test_search.py::test_streams", "tests/api/test_search.py::test_channels_raises_if_wrong_params_are_passed_in[limit-101]", "tests/api/test_search.py::test_channels", "tests/api/test_search.py::test_games" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2018-10-10T14:23:18Z"
mit