Eiffel: multilines string formatting - eiffel

Didn't find a better way to format a multiline string than this way... seems complicated. What would be the best way to format this type of code?
l_qry := "SELECT%
% * %
%FROM %
% enumerate %
%WHERE %
% " + {like item_prototype}.Primary_key_db_column_name + " = " + l_category_id + " %
%UNION %
% SELECT %
% e.* %
% FROM %
% enumerate e %
%INNER JOIN %
% enumerates_leaves s ON s." + {like item_prototype}.Primary_key_db_column_name + " = e." + {like item_prototype}.Category_db_column_name + " %
%) SELECT * FROM enumerates_leaves WHERE enumerates_leaves." + {like item_prototype}.Category_db_column_name + " IS NOT NULL;"

You can use Verbatin Strings for that purpose.
sql_select_country : STRING = "[
SELECT *
from COUNTRY
]"
-- Select all country
For building SQL queries dynamically, you can define a template
with placeholders and then replace them with the expected values.
template_query : STRING = "[
SELECT
*
FROM
enumerate
WHERE
$Primary_key_db_column_name = :id
UNION
SELECT
e.*
FROM
enumerate e
INNER JOIN
enumerates_leaves s ON s.$Primary_key_db_column_name = e.$Category_db_column_name
) SELECT * FROM enumerates_leaves WHERE enumerates_leaves.$Category_db_column_name IS NOT NULL;"
]"
-- Template query `query_name` ....
Using the template
l_query: STRING
create l_query.make_from_string (template_query)
l_query.replace_substring_all ("$Primary_key_db_column_name", {like item_prototype}.Primary_key_db_column_name)
...
In fact, one can generalize this idea an build something like
sql_query_builder (query_template: READABLE_STRING_GENERAL; arguments: ITERABLE [READABLE_STRING_GENERAL]) :STRING

Related

exceeded the 80 characters length limit and was truncated in flink job

I am doing joining in flink and I am getting exceeded the 80 characters length limit and was truncated.
Table tr = tableEnv.sqlQuery("select " +
" coalesce(a.id, b.id) id," +
" coalesce(a.item, b.item) item," +
" a.amount as revenue," +
" b.amount as profit" +
" from " +
" (select * from tableA" +
" where type='revenue') a" +
" full outer join " +
" (select * from tableA" +
" where type='profit') b" +
" on a.id=b.id, a.item=b.item");
I am not sure how to resolve this. Is there any limit of character in joining?
I suspect you are seeing this warning:
The operator name {} exceeded the {} characters length limit and was truncated.
You can safely ignore this. This just means that label you see in the web UI won't show the complete SQL join.

I tried to automate json views in snowflake. But it displays error in the regex

I tried this approach and it is displaying an error that the regex has a missing statement. The approach that I am trying is from the Automating Snowflake’s Semi-Structured JSON Data Handling
CREATE OR REPLACE PROCEDURE create_view_over_json (TABLE_NAME varchar, COL_NAME varchar, VIEW_NAME varchar, COLUMN_CASE varchar, COLUMN_TYPE varchar)
RETURNS VARCHAR
LANGUAGE javascript
AS
$$
var alias_dbl_quote = "";
var path_name = "regexp_replace(regexp_replace(f.path,'\\[(.+)\\]'),'(\\w+)','"\\1"')" // This generates paths with levels enclosed by double quotes (ex: "path"."to"."element"). It also strips any bracket-enclosed array element references (like "[0]")
var attribute_type = "DECODE (substr(typeof(f.value),1,1),'A','ARRAY','B','BOOLEAN','I','FLOAT','D','FLOAT','STRING')"; // This generates column datatypes of ARRAY, BOOLEAN, FLOAT, and STRING only
var alias_name = "REGEXP_REPLACE(REGEXP_REPLACE(f.path, '\\[(.+)\\]'),'[^a-zA-Z0-9]','_')" ; // This generates column aliases based on the path
var table_list = TABLE_NAME;
var col_list = "";
var array_num = 0;
if (COLUMN_CASE.toUpperCase().charAt(0) == 'M') {
alias_dbl_quote = """; } // COLUMN_CASE parameter is set to 'match col case' so add double quotes around view column alias name
if (COLUMN_TYPE.toUpperCase().charAt(0) == 'S') {
attribute_type = "DECODE (typeof(f.value),'ARRAY','ARRAY','STRING')"; } // COLUMN_TYPE parameter is set to 'string datatypes' so typecast to STRING instead of value returned by TYPEPOF function
// Build a query that returns a list of elements which will be used to build the column list for the CREATE VIEW statement
var element_query = "SELECT DISTINCT n" +
path_name + " AS path_name, n" +
attribute_type + " AS attribute_type, n" +
alias_name + " AS alias_name n" +
"FROM n" +
TABLE_NAME + ", n" +
"LATERAL FLATTEN(" + COL_NAME + ", RECURSIVE=>true) f n" +
"WHERE TYPEOF(f.value) != 'OBJECT' n" +
"AND NOT contains(f.path,'[') "; // This prevents traversal down into arrays
// Run the query...
var element_stmt = snowflake.createStatement({sqlText:element_query});
var element_res = element_stmt.execute();
// ...And loop through the list that was returned
while (element_res.next()) {
if (element_res.getColumnValue(2) != 'ARRAY') {
if (col_list != "") {
col_list += ", n";}
col_list += COL_NAME + ":" + element_res.getColumnValue(1); // Start with the element path name
col_list += "::" + element_res.getColumnValue(2); // Add the datatype
col_list += " as " + alias_dbl_quote + element_res.getColumnValue(3) + alias_dbl_quote; // And finally the element alias
}
// Array elements get handled in the following section:
else {
array_num++;
var simple_array_col_list = "";
var object_array_col_list = "";
// Build a query that returns the elements in the current array
var array_query = "SELECT DISTINCT n"+
path_name + " AS path_name, n" +
attribute_type + " AS attribute_type, n" +
alias_name + " AS attribute_name, n" +
"f.index n" +
"FROM n" +
TABLE_NAME + ", n" +
"LATERAL FLATTEN(" + COL_NAME + ":" + element_res.getColumnValue(1) + ", RECURSIVE=>true) f n" +
"WHERE REGEXP_REPLACE(f.path, '.+(\\w+\\[.+\\]).+', 'SubArrayEle') != 'SubArrayEle' "; // This prevents return of elements of nested arrays (the entire array will be returned in this case)
while (array_res.next()) {
if (array_res.getColumnValue(1).substring(1) == "") { // The element path name is empty, so this is a simple array element
if (simple_array_col_list != "") {
simple_array_col_list += ", n";}
simple_array_col_list += COL_NAME + ":" + element_res.getColumnValue(1); // Start with the element path name
simple_array_col_list += "[" + array_res.getColumnValue(4) + "]"; // Add the array index
simple_array_col_list += "::" + array_res.getColumnValue(2); // Add the datatype
simple_array_col_list += " as " + alias_dbl_quote + element_res.getColumnValue(3) + "_" + array_res.getColumnValue(4) + alias_dbl_quote; // And finally the element alias - Note that the array alias is added as a prefix to ensure uniqueness
}
else { // This is an object array element
if (object_array_col_list != "") {
object_array_col_list += ", n";}
object_array_col_list += "a" + array_num + ".value:" + array_res.getColumnValue(1).substring(1); // Start with the element name (minus the leading '.' character)
object_array_col_list += "::" + array_res.getColumnValue(2); // Add the datatype
object_array_col_list += " as " + alias_dbl_quote + element_res.getColumnValue(3) + array_res.getColumnValue(3) + alias_dbl_quote; // And finally the element alias - Note that the array alias is added as a prefix to ensure uniqueness
}
}
// If no object array elements were found then add the simple array elements to the
// column list...
if (object_array_col_list == "") {
if (col_list != "") {
col_list += ", n";}
col_list += simple_array_col_list;
}
// ...otherwise, add the object array elements to the column list along with a
// LATERAL FLATTEN clause that references the current array to the table list
else {
if (col_list != "") {
col_list += ", n";}
col_list += object_array_col_list;
table_list += ",n LATERAL FLATTEN(" + COL_NAME + ":" + element_res.getColumnValue(1) + ") a" + array_num;
}
}
}
// Now build the CREATE VIEW statement
var view_ddl = "CREATE OR REPLACE VIEW " + VIEW_NAME + " AS n" +
"SELECT n" + col_list + "n" +
"FROM " + table_list;
// Now run the CREATE VIEW statement
var view_stmt = snowflake.createStatement({sqlText:view_ddl});
var view_res = view_stmt.execute();
return view_res.next();
$$;
I tried this approach but it is displaying an error that regex has the [link I followed ][1]error.
REGEX ERROR*
[1]: https://www.snowflake.com/blog/automating-snowflakes-semi-structured-json-data-handling-part-2/#
I am getting this error:
JavaScript compilation error: Uncaught SyntaxError: Invalid or unexpected token
in CREATE_VIEW_OVER_JSON at ' var path_name = "regexp_replace(regexp_replace(f.path,'\[(.+)\]'),'(\\w+)','"\\1"')" ;'
position 82
Your error can be produced with just the first two line of the SP because you have you quotes incorrectly escaped/paired for the task:
CREATE OR REPLACE PROCEDURE create_view_over_json (TABLE_NAME varchar, COL_NAME varchar, VIEW_NAME varchar, COLUMN_CASE varchar, COLUMN_TYPE varchar)
RETURNS VARCHAR
LANGUAGE javascript
AS
$$
var alias_dbl_quote = "";
var path_name = "regexp_replace(regexp_replace(f.path,'\\[(.+)\\]'),'(\\w+)','"\\1"')" // This generates paths with levels enclosed by double quotes
$$;
call create_view_over_json(null,null,null,null,null);
gives:
100131 (P0000): JavaScript compilation error: Uncaught SyntaxError: Invalid or unexpected token in CREATE_VIEW_OVER_JSON at 'var path_name = "regexp_replace(regexp_replace(f.path,'\[(.+)\]'),'(\w+)','"\1"')" // This generates paths with levels enclosed by double quotes ' position 79
In SQL and in Javascript you cannot put the Quote you are using to wrap are "string" in the string without escaping. In SQL you can use $$ or ' so you have used $$ for the stored procedure body. Where inside the JavaScript you can use ', ", `
Thus in this line you have used double quoutes, which means you can embed the single quotes, but those doubles you have in the string terminate the string..
"regexp_replace(regexp_replace(f.path,'\\[(.+)\\]'),'(\\w+)','"\\1"')"
^ ^
Thus given you need to use both single and double you should wrap the string in back qoutes `
var path_name = `regexp_replace(regexp_replace(f.path,'\\[(.+)\\]'),'(\\w+)','"\\1"')`;

Nested match_recognize query not supported in flink SQL?

I am using flink 1.11 and trying nested query where match_recognize is inside, as shown below :
select * from events where id = (SELECT * FROM events MATCH_RECOGNIZE (PARTITION BY org_id ORDER BY proctime MEASURES A.id AS startId ONE ROW PER MATCH PATTERN (A C* B) DEFINE A AS A.tag = 'tag1', C AS C.tag <> 'tag2', B AS B.tag = 'tag2'));
And I am getting an error as : org.apache.calcite.sql.validate.SqlValidatorException: Table 'A' not found
Is this not supported ? If not what's the alternative ?
I was able to get something working by doing this:
Table events = tableEnv.fromDataStream(input,
$("sensorId"),
$("ts").rowtime(),
$("kwh"));
tableEnv.createTemporaryView("events", events);
Table matches = tableEnv.sqlQuery(
"SELECT id " +
"FROM events " +
"MATCH_RECOGNIZE ( " +
"PARTITION BY sensorId " +
"ORDER BY ts " +
"MEASURES " +
"this_step.sensorId AS id " +
"AFTER MATCH SKIP TO NEXT ROW " +
"PATTERN (this_step next_step) " +
"DEFINE " +
"this_step AS TRUE, " +
"next_step AS TRUE " +
")"
);
tableEnv.createTemporaryView("mmm", matches);
Table results = tableEnv.sqlQuery(
"SELECT * FROM events WHERE events.sensorId IN (select * from mmm)");
tableEnv
.toAppendStream(results, Row.class)
.print();
For some reason, I couldn't get it to work without defining a view. I kept getting Calcite errors.
I guess you are trying to avoid enumerating all of the columns from A in the MEASURES clause of the MATCH_RECOGNIZE. You may want to compare the resulting execution plans to see if there's any significant difference.

T-SQL Concatenating

I need to concatenate the following statement in SQL.
I am joining 2 tables, the MEDINFO table and the IMMUNIZE table.
There should be one record for each of the shot code dates as long as it is not null or 01/01/1901.
The output must look like this:
"Admin-" + medinfo_field_31 + ", Manuf-" + medinfo_field_32 + ", Lot-" + medinfo_field_33 + ", Exp-" + medinfo_field_34 + ", Site-" + medifno_field_35 + ", Dose-" + medinfo_field_36
Here is the criteria for the SQL statement I must create:
IF immunize_shot_code = 'FLU',
then concatenate medinfo_field_12 and medinfo_field_19 thru medinfo_field_24
IF immunize_shot_code = 'MENI',
then concatenate medinof_field_11 and medinfo_field_25 thru medinof_field_30
IF immunize_shot_code = 'TETA',
then concatenate medinfo_field_13 thru medinfo_field_18 and medinfo_field_31 thru medinfo_field_36"
Thanks for any ideas.
This would look something like the followning:
SELECT
(CASE WHEN Immunize_shot_code='Flu'
THEN medinfo_field_12+', '+med_info_field_19+ ', '+med_info_field_20+', '+med_info_field_21+ ', '+med_info_field_22+', '+med_info_field_23+', 'med_info_field_24
WHEN immunize_shot_code='MENI'
THEN [CONCATINATE AS PER ABOVE]
.....
.....
END) as ImmuCode
FROM TABLE_1
Like some of them mentioned above, you could use CASE statement something like this:
select
case when i.immunize_shot_Code = 'FLU'
then concat(m.medinfo_field_12,',',m.medinfo_field_19,',',m.medinfo_field_24...)
when i.immunize_Shot_Code = 'MENI'
then concat(m.medinfo_field_11,',',m.medinfo_field_11)
when i.immunize_Shot_code = 'TETA'
then concat(m.medinfo_field_13,',',m.medinfo_field18,',')
end as concat_value
from medinfo m
join immunize i on m.id = i.medinfo_id --change this to match your join
where (shot_code_date is not null or shot_code_date <> '01-01-1901');

What's the most efficient way to search text of any data type in EF6?

I try to dynamically create some LINQ query for searching data in all columns.
Currently, I use the following code to build dynamic LINQ query. However, it's quite buggy when deals with complex column.
var type = property[col.ToLower()].PropertyType;
var isNullableType = type.IsGenericType && type.GetGenericTypeDefinition() == typeof (Nullable<>);
if (type.IsValueType && !isNullableType)
{
filter += col + ".ToString().ToLower().Contains(#" + i + ".ToLower())";
}
else if (isNullableType)
{
filter += col + ".HasValue ? " + col + ".Value.ToString().ToLower().Contains(#" + i + ".ToLower())" + " : false";
}
else
{
filter += "(" + col + " != null ? " + col + " : \"\").ToString().ToLower().Contains(#" + i + ".ToLower())";
}
Do you have any idea to simplify my above code? I'm OK if your suggestion is work only for SQL Server 2008 or later.
Note:
Column data can be any type like integer, string, object, enum and it can be null.

Resources