SQL command not properly ended errror database oracle - database

I have a request to add data that triggers a trigger that checks one condition, and only after that adds it to the purchase table. He also swears at the lines of initializations total_rasr and id_buyer invalid number, although there are the same types in the table and in the trigger. And the biggest question is, this trigger worked, but at one point it stopped and gives these errors.
INSERT INTO PAYMENT (ID, ADDRESS, DATE_PAYMENT, PAYMENT_METHOD_ID, EMPLOYEE_ID, BUYER_ID)
VALUES (Key('PAYMENT'), 'Moscow', TO_DATE('2002-08-23', 'YYYY-MM-DD'),'005!00002','1','002!00005');
trigger
create or replace TRIGGER checking_the_availability_work
BEFORE INSERT
ON PAYMENT
FOR EACH ROW
DECLARE
rasr_is_possible boolean := true;
total_rasr VARCHAR(10);
id_buyer VARCHAR(10);
AVAILABILITY_OF_WORK VARCHAR(5);
xSQL VARCHAR(100);
BEGIN
total_rasr := :NEW.PAYMENT_METHOD_ID;
id_buyer := :NEW.BUYER_ID;
IF total_rasr = '005!00002' THEN
xSQL := 'select AVAILABILITY_OF_WORK FROM BUYER WHERE ID =' || id_buyer;
execute immediate xSQL into AVAILABILITY_OF_WORK;
IF AVAILABILITY_OF_WORK = 'Нет' THEN
rasr_is_possible := false;
END IF;
END IF;
if not rasr_is_possible THEN
Raise_application_error(-20201, 'У вас нет места работы!');
end if;
END;

Why use dynamic SQL here at all? It seems really convoluted and is resulting in an error, perhaps when :new.buyer_id is null or has some other unexpected value? Try something like this, and perhaps consider a check on :new.buyer_id to make sure it has a value as expected.
DECLARE
L_AVAILABILITY_OF_WORK VARCHAR(5);
BEGIN
IF :NEW.PAYMENT_METHOD_ID = '005!00002' THEN
select AVAILABILITY_OF_WORK into L_AVAILABILITY_OF_WORK
FROM BUYER WHERE ID = :NEW.BUYER_ID;
IF L_AVAILABILITY_OF_WORK = 'Нет' THEN
Raise_application_error(-20201, 'У вас нет места работы!');
END IF;
END IF;
END;

Related

Insert or Update in an DBLINK as a Loop

I have a Row, which i wannt to Insert (or Update if the ID is already used) from another Schema with an DBLINK. This Table has many FKs, so i need to Update or Insert(if doesnt exist) around 20 Tables.
I tried it by writting all needed Rows from the different Tables from the DBLINK into Variables and then checked if the Targeted Tables had this rows already. Im sure that there is a much easier way to do it. A loop where the tables and their rowtypes are saved in an Collection with execute immdediate for example, but i couldnt find anything.
This example is like 20 times in my Code and im sure that there is an easier way to do it then writting this over and over.
PROCEDURE Test_copy (pi_id IN NUMBER) IS
V_REC_Table1 Table1%ROWTYPE;
V_REC_Table2 Table2%ROWTYPE;
V_REC_Table3 Table3%ROWTYPE;
V_REC_Table4 Table4%ROWTYPE;
V_REC_Table5 Table5%ROWTYPE;
V_REC_Table6 Table6%ROWTYPE;
V_REC_Table7 Table7%ROWTYPE;
V_REC_Table8 Table8%ROWTYPE;
V_REC_Table9 Table9%ROWTYPE;
V_REC_Table10 Table10%ROWTYPE;
V_REC_Table1_exists Table1%ROWTYPE;
V_REC_Table2_exists Table2%ROWTYPE;
V_REC_Table3_exists Table3%ROWTYPE;
V_REC_Table4_exists Table4%ROWTYPE;
V_REC_Table5_exists Table5%ROWTYPE;
V_REC_Table6_exists Table6%ROWTYPE;
V_REC_Table7_exists Table7%ROWTYPE;
V_REC_Table8_exists Table8%ROWTYPE;
V_REC_Table9_exists Table9%ROWTYPE;
V_REC_Table10_exists Table10%ROWTYPE;
v_sql VARCHAR2(2000);
Begin
v_sql := 'SELECT * FROM Table1#dblink WHERE ID = '||pi_id;
EXECUTE IMMEDIATE v_sql INTO V_REC_DBLINK_Table1;
Exception
when no_data_found then
Raise_application_Error(-20001, 'ID doesnt exist');
END;
Begin
v_sql := 'SELECT * FROM Table2#dblink WHERE ID = '||V_REC_Table1.T2_ID;
EXECUTE IMMEDIATE v_sql INTO V_REC_DBLINK_Table2;
Exception
when no_data_found then
Raise_application_Error(-20001, 'ID doesnt exist');
END;
(and so on)
.
.
.
BEGIN
v_sql := 'SELECT * FROM Table1 WHERE ID = '||V_REC_DBLINK_TABLE1.ID;
EXECUTE IMMEDIATE v_sql INTO V_REC_DBLINK_TABLE1_EXISTS;
UPDATE TABLE1
SET ID = V_REC_DBLINK_Table1.ID,
NAME = V_REC_DBLINK_Table1.NAME,
DESCRIPTION = V_REC_DBLINK_Table1.DESCRIPTION
NOTE = V_REC_DBLINK_Table1.NOTE
NUMBER = V_REC_DBLINK_Table1.NUMBER
ADDRESS = V_REC_DBLINK_Table1.ADDRESS
WHERE ID = V_REC_DBLINK_TABLE1.ID;
EXCEPTION
when no_data_found then
INSERT INTO TABLE1
(ID, NAME, DESCRIPTION, NOTE, NUMBER, ADDRESS)
Values(V_REC_DBLINK_TABLE1.ID
,V_REC_DBLINK_TABLE1.NAME
,V_REC_DBLINK_TABLE1.DESCRIPTION
,V_REC_DBLINK_TABLE1.NOTE
,V_REC_DBLINK_TABLE1.NUMBER
,V_REC_DBLINK_TABLE1.ADDRESS);
END;
BEGIN
v_sql := 'SELECT * FROM Table2 WHERE ID = '||V_REC_DBLINK_TABLE2.ID;
EXECUTE IMMEDIATE v_sql INTO V_REC_DBLINK_TABLE2_EXISTS;
UPDATE TABLE2
SET ID = V_REC_DBLINK_Table2.ID,
NAME = V_REC_DBLINK_Table2.NAME,
DESCRIPTION = V_REC_DBLINK_Table2.DESCRIPTION
WHERE ID = V_REC_DBLINK_TABLE2.ID;
EXCEPTION
when no_data_found then
INSERT INTO TABLE2
(ID, NAME, DESCRIPTION)
Values(V_REC_DBLINK_TABLE2.ID
,V_REC_DBLINK_TABLE2.NAME
,V_REC_DBLINK_TABLE2.DESCRIPTION);
END;
(and so on for all Tables via DBLINK)
Any suggestions? I just want to do that in a shorter way(maybe via Collection and loop).

Atomic Transaction with nested BEFORE INSERT/UPDATE Triggers

Currently i am implementing a procedure, which creates a couple of rows in some related tables out of a template. So my Procedure consists of a SAVEPOINT followed by some INSERT statements on different tables, and a Cursor to insert some more rows to other tables while referencing on the newly created primary keys.
Each of those tables has an BEFORE INSERT/UPDATE trigger defined which has the purpose to:
Get a new primary key from a sequencer if it is not defined in the INSERT statement (there are cases where I need to set the Primary key explicitely to reference it later on in the same transaction)
Set some default values if they are NULL
Set auditing fields (last_change_date, last_change_user, etc..)
The transaction fails with ORA-04091: table is mutating, trigger/function may not see it
I am understanding, that I could Workaround this, by declaring PRAGMA AUTONOMOUS TRANSACTION in each Trigger, but my Transaction would not be atomic any more then, as it is the requirement that all those datasets should be created/inserted as a whole or None of them.
So what am I doing wrong in the design of my database?
UPDATE: This is the Code of the trigger
CREATE TRIGGER TRG_AUFTRAG_B_IU
BEFORE INSERT OR UPDATE
ON AUFTRAG
FOR EACH ROW
BEGIN
IF INSERTING THEN
IF :new.id is NULL or :new.id = 0 THEN
SELECT SEQ_AUFTRAG.nextval into :new.id from dual;
END IF;
IF :new.nummer is NULL or :new.nummer = 0 THEN
SELECT nvl(MAX(NUMMER),0)+1 INTO :new.nummer FROM AUFTRAG WHERE EXTRACT(YEAR from DATUM) = EXTRACT(YEAR from :new.DATUM);
END IF;
--DEFAULT Values
IF :new.BETR_GRENZWERTE_RELEVANT is NULL THEN
SELECT 0 INTO :new.BETR_GRENZWERTE_RELEVANT FROM dual;
END IF;
IF :new.DOKUMENTE_ABGELEGT is NULL THEN
SELECT 0 INTO :new.DOKUMENTE_ABGELEGT FROM dual;
END IF;
IF :new.EXT_ORG is NULL or :new.EXT_ORG < 1 THEN
SELECT 1 INTO :new.EXT_ORG FROM dual;
END IF;
:new.ERSTELLT_VON := nvl(:new.ERSTELLT_VON,user);
:new.ERSTELLT_DATUM := nvl(:new.ERSTELLT_DATUM,sysdate);
END IF;
:new.GEAENDERT_VON := user;
:new.GEAENDERT_DATUM := sysdate;
END;
You can write it more compact like this:
CREATE TRIGGER TRG_AUFTRAG_B_IU
BEFORE INSERT OR UPDATE
ON AUFTRAG
FOR EACH ROW
BEGIN
IF INSERTING THEN
:new.id = NVL(NULLIF(:new.id, 0), SEQ_AUFTRAG.nextval);
--DEFAULT Values
:new.BETR_GRENZWERTE_RELEVANT := NVL(:new.BETR_GRENZWERTE_RELEVANT, 0);
:new.DOKUMENTE_ABGELEGT := NVL(:new.DOKUMENTE_ABGELEGT, 0);
IF :new.EXT_ORG is NULL or :new.EXT_ORG < 1 THEN
:new.EXT_ORG := 1;
END IF;
:new.ERSTELLT_VON := nvl(:new.ERSTELLT_VON,user);
:new.ERSTELLT_DATUM := nvl(:new.ERSTELLT_DATUM,sysdate);
END IF;
:new.GEAENDERT_VON := user;
:new.GEAENDERT_DATUM := sysdate;
END;
Only "problem" is this part
IF :new.nummer is NULL or :new.nummer = 0 THEN
SELECT nvl(MAX(NUMMER),0)+1 INTO :new.nummer
FROM AUFTRAG
WHERE EXTRACT(YEAR from DATUM) = EXTRACT(YEAR from :new.DATUM);
END IF;
This one you should put into your procedure or in a statement trigger (i.e. without FOR EACH ROW clause) like this:
CREATE TRIGGER TRG_AUFTRAG_B_A
AFTER INSERT ON AUFTRAG
BEGIN
UPDATE
(SELECT ID, NUMMER,
ROW_NUMBER() OVER (PARTITION BY EXTRACT(YEAR from DATUM) ORDER BY ID) as N
FROM AUFTRAG)
SET NUMMER = N
WHERE NUMMER IS NULL;
END;

Correct locking for stored procedure

Apologies for the basic nature of this question by SQL, but it comes from a SQL noob.
I've created the following stored procedure after some online research. The aim of the the procedure is to maintain count (VisitCount), so the appropriate locking is necessary to maintain integrity. As far as I understand MERGE gives the correct level of lock for this scenario but I'd appreciate it if someone could advise whether this is correct or not.
Thanks.
ALTER PROCEDURE dbo.Popularity_Update
#TermID int
AS
SET NOCOUNT ON
DECLARE #Now date = SYSDATETIME()
BEGIN TRY
MERGE Popularity AS t
USING (SELECT #TermID AS TermID, #Now AS VisitDate) AS s ON t.TermID = s.TermID
AND t.VisitDate = s.VisitDate
WHEN MATCHED THEN
UPDATE
SET VisitCount += 1
WHEN NOT MATCHED BY TARGET THEN
INSERT (TermID, VisitDate, VisitCount)
VALUES (s.TermID, s.VisitDate, 1);
END TRY
BEGIN CATCH
END CATCH
How about this....
ALTER PROCEDURE dbo.Popularity_Update
#TermID int
AS
BEGIN
SET NOCOUNT ON;
DECLARE #Now date = SYSDATETIME()
BEGIN TRY
UPDATE Popularity
SET VisitCount = COALESCE(VisitCount, 0) + 1
WHERE TermID = #TermID
AND VisitDate = #Now
IF (##ROWCOUNT = 0)
BEGIN
INSERT INTO Popularity (TermID, VisitDate, VisitCount)
VALUES (#TermID, #Now, 1)
END
END TRY
BEGIN CATCH
END CATCH
END

Cursor Not Returned Query if Execute SQL Statement with Return Value

I have an issue when executing query bellow with DBExpress and Delphi XE. I need to get last identity id from executed query :
function TServerDBUtils.ExecuteQueryWithIdentity(ASQLConn: TSQLConnection): Integer;
var
newSQLQuery: TSQLQuery;
begin
Result := -1;
newSQLQuery := TSQLQuery.Create(nil);
try
with newSQLQuery do
begin
SQLConnection := ASQLConn;
SQL.Clear;
SQL.Add('Insert into SampleTable(uomname) values(' + Quotedstr('bag') +')';
SQL.Add('Select Scope_Identity()');
Open;
Result:= Fields[0].AsInteger;
end;
finally
FreeAndNil(newSQLQuery);
end;
end;
I get error "cursor not returned query". I have used same method before, using FireDac & Delphi XE5 and got no error. No, I'm wondering if "open" is not allowed to do such thing in DBExpress. What method I should use? (We have to use DBExpress in our project)
I've tried this :
function TServerDBUtils.ExecuteQueryWithIdentity(ASQLConn: TSQLConnection): Integer;
var
newSQLQuery: TSQLQuery;
begin
Result := -1;
newSQLQuery := TSQLQuery.Create(nil);
try
with newSQLQuery do
begin
SQLConnection := ASQLConn;
SQL.Clear;
SQL.Add('Insert into SampleTable(uomname) values(' + Quotedstr('bag') +')';
ExecSQL;
SQL.Clear;
SQL.Add('Select Scope_Identity()');
Open;
Result:= Fields[0].AsInteger;
end;
finally
FreeAndNil(newSQLQuery);
end;
end;
And always got null values, maybe because different session.
Sorry for my bad english, and thanks in advance for any help.
Update :
It works if we used ##identity :
SQL.Add('Insert into SampleTable(uomname) values(' + Quotedstr('bag') +')';
ExecSQL;
SQL.Clear;
SQL.Add('Select ##Identity');
Open;
Result:= Fields[0].AsInteger;
But, there's problem as SQLServer told, that if there was a trigger on that table fired (on Insert), The return value is the last ID of table that trigger inserted.
The most elegant way on SQL-Server might be to use the
OUTPUT Clause
which is not only capable to return one ID but all new genered one in case of a multipart insert.
INSERT into aTable (aField)
OUTPUT Inserted.ID
Values ('SomeValue')
If you have got a trigger on your table you will have to define a destination table for your OUTPUT
DECLARE #tmp table (ID int)
INSERT into aTable (aField)
OUTPUT Inserted.ID into #tmp
Values ('SomeValue')
Select * from #tmp
Another advice would be to use parameters instead of hard coded values.
With TSQLQuery adding SET NOCOUNT ON before the statement will prevent the cursor not returned query error an deliver the expected result:
begin
SQLQuery1.SQL.text :='SET NOCOUNT ON'
+#13#10'DECLARE #tmp table (ID int)'
+#13#10'INSERT into aTable (aField)'
+#13#10'OUTPUT Inserted.ID into #tmp'
+#13#10'Values (:P)'
+#13#10'Select * from #tmp';
SQLQuery1.Params.ParamByName('P').Value := 'SomeText';
SQLQuery1.Open;
Showmessage(SQLQuery1.Fields[0].asString);
end;
I use to do this using XE2 and SQL Server
with newSQLQuery do
try
SQLConnection := ASQLConn;
SQL.Clear;
SQL.Add('Insert into SampleTable(uomname) values(' + Quotedstr('bag') +')');
ExecSQL;
SQL.Clear;
SQL.Add('Select Scope_Identity() as id');
Open;
if not Eof then
Result := FieldByName('id').asInteger;
finally
FreeAndNil(newSQLQuery);
end;
So, I introduce the as keyword on the second SQL statement. When I open the Query, just check for the field with name 'id' that was requested.

DB2 Stored Procedure - controlled large record deletion

Ive written a DB2 Stored Procedure for a housekeeping job that in theory could be deleting large volumes for data from the database.
The requirement is to control the deletion by committing a defined number of records at a time.
Firstly, I would like some feedback on my Stored Procedure to see if there is any improvements I could make.
Secondly, Ive got a question about SQL Errors. If an error occurs during an iteration of the loop, does the Stored Procedure exit immediately ? Ideally I would like to continue the loop trying to delete as many records as I can. Im not sure if my script works in this way or not.
CREATE PROCEDURE leave_loop(IN commit_unit INTEGER, OUT counter INTEGER)
LANGUAGE SQL
BEGIN
DECLARE v_prod_id INTEGER;
DECLARE v_delete_counter INTEGER DEFAULT 0;
DECLARE v_total INTEGER DEFAULT 0;
DECLARE not_found CHAR(1) DEFAULT 'N';
DECLARE c1 CURSOR WITH HOLD FOR
SELECT prod_id
FROM product
WHERE status_deleted = 1
ORDER BY prod_id;
DECLARE CONTINUE HANDLER FOR NOT FOUND
SET not_found = 'Y';
SET counter = 0;
OPEN c1;
delete_loop:
LOOP
-- Fetch the record
FETCH c1 INTO v_prod_id;
-- If not row found then leave the loop
IF not_found = 'Y' THEN
-- If we have not reached the commit unit the commit the outstanding records
IF v_delete_counter > 0 THEN
COMMIT;
END IF;
LEAVE delete_loop;
END IF;
-- Perform the deletion
DELETE FROM product WHERE prod_id = v_prod_id;
SET v_delete_counter = v_delete_counter + 1;
-- Check if the commit unit has been reached
IF MOD(v_delete_counter, commit_unit) = 0 THEN
COMMIT;
SET v_delete_counter = 0;
SET v_total = v_total + 1;
END IF;
END LOOP delete_loop;
CLOSE c1;
SET total = v_total;
END #
I had similar requirements in the past. Please find below the stored procedure which I wrote to serve my needs.
SET SERVEROUTPUT ON#
drop procedure DELETE_WITH_COMMIT_COUNT#
CREATE PROCEDURE DELETE_WITH_COMMIT_COUNT(IN v_TABLE_NAME VARCHAR(24), IN v_COMMIT_COUNT INTEGER, IN v_WHERE_CONDITION VARCHAR(1024))
NOT DETERMINISTIC
LANGUAGE SQL
BEGIN
-- DECLARE Statements
DECLARE SQLCODE INTEGER;
DECLARE v_COUNTER INTEGER DEFAULT 0;
DECLARE v_DELETE_QUERY VARCHAR(1024);
DECLARE v_DELETE_STATEMENT STATEMENT;
SET v_DELETE_QUERY = 'DELETE FROM (SELECT 1 FROM ' || v_TABLE_NAME || ' WHERE ' || v_WHERE_CONDITION
|| ' FETCH FIRST ' || RTRIM(CHAR(v_COMMIT_COUNT)) || ' ROWS ONLY) AS DELETE_TABLE';
PREPARE v_DELETE_STATEMENT FROM v_DELETE_QUERY;
DEL_LOOP:
LOOP
SET v_COUNTER=v_COUNTER + 1;
EXECUTE v_DELETE_STATEMENT;
IF SQLCODE = 100 THEN
LEAVE DEL_LOOP;
END IF;
COMMIT;
END LOOP;
COMMIT;
END#
You can add 'DECLARE CONTINUE HANDLER FOR SQLSTATE XXXX' to your stored procedure which will bypass the erroneous executions and shall avoid abrupt termination of your stored proc.

Resources