Parsing stdout chunks into arrays in bash or ruby - arrays

I am trying to find the most efficient way to turn stdout log entries from racadm (dell chassis/idrac) into individual arrays or json arrays so I can evaluate each entry one at a time. The output always has the same fields. The output below is pretty typical
$ racadm chassislog view -c Storage -b PDR
SeqNumber = 11700
Message ID = PDR17
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-21 00:02:06
Message Arg 1 = Physical Disk 0:0:15
FQDD = Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Global hot spare assigned to Physical Disk 0:0:15.
--------------------------------------------------------------------------------
SeqNumber = 11699
Message ID = PDR26
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-21 00:02:04
Message Arg 1 = Physical Disk 0:0:3
FQDD = Disk.Bay.3:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Physical Disk 0:0:3 is online.
--------------------------------------------------------------------------------
SeqNumber = 11696
Message ID = PDR71
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-21 00:02:01
Message Arg 1 = Physical Disk 0:0:15
Message Arg 2 = Physical Disk 0:0:3
FQDD = Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Copyback completed from Physical Disk 0:0:15 to Physical Disk 0:0:3.
--------------------------------------------------------------------------------
SeqNumber = 11670
Message ID = PDR70
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-20 21:45:47
Message Arg 1 = Physical Disk 0:0:15
Message Arg 2 = Physical Disk 0:0:3
FQDD = Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Copyback started from Physical Disk 0:0:15 to Physical Disk 0:0:3.
--------------------------------------------------------------------------------
SeqNumber = 11667
Message ID = PDR8
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-20 21:45:44
Message Arg 1 = Physical Disk 0:0:3
FQDD = Disk.Bay.3:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Physical Disk 0:0:3 is inserted.
--------------------------------------------------------------------------------
i'd really love to read the entire output into an associative array so I could step through
each entry in a for loop for events. Looking for guidance in ruby(chef) or bash.

This perl one-liner converts input like the above into an array of JSON objects which you can then process in any JSON-aware tool.
racadm chassislog view -c Storage -b PDR | \
perl -MJSON::PP -lne 'if (/([^=]*?)\s*=\s*(.*)/) { $obj{$1} = $2 }
elsif (/^-+$/) { push #records, { %obj }; undef %obj }
END { push #records, { %obj } if defined %obj;
print encode_json(\#records) }'
outputs (After pretty-printing):
[
{
"Timestamp": "2020-03-21 00:02:06",
"Message ID": "PDR17",
"Category": "Storage",
"Message": "Global hot spare assigned to Physical Disk 0:0:15.",
"AgentID": "CMC",
"Severity": "Information",
"SeqNumber": "11700",
"FQDD": "Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1",
"Message Arg 1": "Physical Disk 0:0:15"
},
{
"Category": "Storage",
"Message ID": "PDR26",
"Timestamp": "2020-03-21 00:02:04",
"SeqNumber": "11699",
"Message": "Physical Disk 0:0:3 is online.",
"Severity": "Information",
"AgentID": "CMC",
"Message Arg 1": "Physical Disk 0:0:3",
"FQDD": "Disk.Bay.3:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1"
},
{
"FQDD": "Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1",
"Message Arg 2": "Physical Disk 0:0:3",
"Message Arg 1": "Physical Disk 0:0:15",
"Severity": "Information",
"AgentID": "CMC",
"Message": "Copyback completed from Physical Disk 0:0:15 to Physical Disk 0:0:3.",
"SeqNumber": "11696",
"Timestamp": "2020-03-21 00:02:01",
"Category": "Storage",
"Message ID": "PDR71"
},
{
"Message Arg 1": "Physical Disk 0:0:15",
"FQDD": "Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1",
"Message Arg 2": "Physical Disk 0:0:3",
"SeqNumber": "11670",
"Message": "Copyback started from Physical Disk 0:0:15 to Physical Disk 0:0:3.",
"Severity": "Information",
"AgentID": "CMC",
"Category": "Storage",
"Message ID": "PDR70",
"Timestamp": "2020-03-20 21:45:47"
},
{
"Timestamp": "2020-03-20 21:45:44",
"Message ID": "PDR8",
"Category": "Storage",
"Message": "Physical Disk 0:0:3 is inserted.",
"AgentID": "CMC",
"Severity": "Information",
"SeqNumber": "11667",
"FQDD": "Disk.Bay.3:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1",
"Message Arg 1": "Physical Disk 0:0:3"
}
]

Not bash, since shell is for handling files and launching commands, but using GNU awk which is often falsely percieved as part of the shell, it's simple yet powerfull programming language. step through each entry in a for loop for events is not really a requirement so here is a small sample:
$ gawk -v item="Message Arg 2" ' # queried item as parameter
BEGIN {
RS="\n-+$\n" # record is separated by a bunch of -:s
FS="\n" # a line is a field within a record
}
{
for(nf=1;nf<=NF;nf++) { # loop all lines in a record
split($nf,t,/ *= */) # split lines by = and surrounding space
a[NR][t[1]]=t[2] # hash to a 2 dimensional array indexed by
} # record no. and the item, value as value
}
END { # after lines are hashed, make queries
for(nr in a) # for each record in hash
if(item in a[nr]) # if queried item is found in it
printf "%d: %s = %s\n", nr,item,a[nr][item] # output
}' file
Output for query item Message Arg 2:
3: Message Arg 2 = Physical Disk 0:0:3
4: Message Arg 2 = Physical Disk 0:0:3
Here is an alternate ending for match a condition im looking for in "Message" I would like to reference the corresponding FQDD:
$ gawk -v item=Message -v cond=started -v output=FQDD
BEGIN {
RS="\n-+$\n" # record is separated by a bunch of -:s
FS="\n" # a line is a field within a record
}
{
for(nf=1;nf<=NF;nf++) { # loop all lines in a record
split($nf,t,/ *= */) # split lines by = and surrounding space
a[NR][t[1]]=t[2] # hash to a 2 dimensional array indexed by
} # record no. and the item, value as value
}
END {
for(nr in a)
if((item in a[nr]) && a[nr][item]~cond)
printf "%d: %s = %s\n", nr,output,a[nr][output]
}
Output now:
4: FQDD = Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
ie. if variable item is found in a[nr][item] and that array elemets value matches with cond print the value of a[nr]["FQDD"] in the same record.
In SQL that would be SELECT output FROMfileWHERE item LIKE '%cond%'

Based on Shawns one liner as a pattern, a colleague ended up finding a python 2.7 compatible way to do exactly what we want, code is below and offers the exact functionality I need.
import re
import json
from pprint import pprint
regex_string_1 = '([^=]*?)\s*=\s*(.*)'
regex_string_2 = '^-+$'
regex1 = re.compile(regex_string_1)
regex2 = re.compile(regex_string_2)
current_entry = {}
entries = []
lines = test.split('\n')
for line in lines:
if regex1.match(line):
key, value = [element.strip() for element in line.split('=')]
current_entry[key] = value
elif regex2.match(line):
entries.append(current_entry)
current_entry = {}
pprint(entries)

Related

JQ if else then NULL Return

I'm trying to filter and output from JSON with jq.
The API will sometime return an object and sometime an array, I want to catch the result using an if statement and return empty string when the object/array is not available.
{
"result":
{
"entry": {
"id": "207579",
"title": "Realtek Bluetooth Mesh SDK on Linux\/Android Segmented Packet reference buffer overflow",
"summary": "A vulnerability, which was classified as critical, was found in Realtek Bluetooth Mesh SDK on Linux\/Android (the affected version unknown). This affects an unknown functionality of the component Segmented Packet Handler. There is no information about possible countermeasures known. It may be suggested to replace the affected object with an alternative product.",
"details": {
"affected": "A vulnerability, which was classified as critical, was found in Realtek Bluetooth Mesh SDK on Linux\/Android (the affected version unknown).",
"vulnerability": "The manipulation of the argument reference with an unknown input leads to a unknown weakness. CWE is classifying the issue as CWE-120. The program copies an input buffer to an output buffer without verifying that the size of the input buffer is less than the size of the output buffer, leading to a buffer overflow.",
"impact": "This is going to have an impact on confidentiality, integrity, and availability.",
"countermeasure": "There is no information about possible countermeasures known. It may be suggested to replace the affected object with an alternative product."
},
"timestamp": {
"create": "1661860801",
"change": "1661861110"
},
"changelog": [
"software_argument"
]
},
"software": {
"vendor": "Realtek",
"name": "Bluetooth Mesh SDK",
"platform": [
"Linux",
"Android"
],
"component": "Segmented Packet Handler",
"argument": "reference",
"cpe": [
"cpe:\/a:realtek:bluetooth_mesh_sdk"
],
"cpe23": [
"cpe:2.3:a:realtek:bluetooth_mesh_sdk:*:*:*:*:*:*:*:*"
]
}
}
}
Would also like to to use the statement globally for the whole array output so I can parse it to .csv and escape the null, since sofware name , can also contain an array or an object. Having a global if statement with simplify the syntax result and suppress the error with ?
The error i received from bash
jq -r '.result [] | [ "https://vuldb.com/?id." + .entry.id ,.software.vendor // "empty",(.software.name | if type!="array" then [.] | join (",") else . //"empty" end )?,.software.type // "empty",(.software.platform | if type!="array" then [] else . | join (",") //"empty" end )?] | #csv' > tst.csv
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 7452 0 7393 100 59 4892 39 0:00:01 0:00:01 --:--:-- 4935
jq: error (at <stdin>:182): Cannot iterate over null (null)
What I have tried is the following code which i tried to demo https://jqplay.org/ which is incorrect syntax
.result [] |( if .[] == null then // "empty" else . end
| ,software.name // "empty" ,.software.platform |if type!="array" then [.] // "empty" else . | join (",") end)
Current output
[
[
"Bluetooth Mesh SDK"
],
"Linux,Android"
]
Desired outcome
[
"Bluetooth Mesh SDK",
"empty"
]
After fixing your input JSON, I think you can get the desired output by using the following JQ filter:
if (.result | type) == "array" then . else (.result |= [.]) end \
| .result[].software | [ .name, (.platform // [ "Empty" ] | join(",")) ]
Where
if (.result | type) == "array" then . else (.result |= [.]) end
Wraps .result in an array if type isn't array
.result[].software
Loops though the software in each .result obj
[ .name, (.platform // [ "Empty" ] | join(",")) ]
Create an array with .name and .platform (which is replaced by [ "Empty" ] when it doesn't exist. Then it's join()'d to a string
Outcome:
[
"Bluetooth Mesh SDK",
"Linux,Android"
]
Online demo
Or
[
"Bluetooth Mesh SDK",
"Empty
]
Online demo

HTTP 400 Error: size is too accurate. Smallest unit is 0.00000001

i am making a call to
authedClient.placeOrder(sellParams)
with params:
sellParams:any = {
'side': 'sell',
'product_id': 'BTC-USD',
'type': ‘market’,
’size’: 0.012613515
}
this throws error:
Error: HTTP 400 Error: size is too accurate. Smallest unit is 0.00000001
at Request._callback (/srv/node_modules/coinbase-pro/lib/clients/public.js:68:15)
at Request.self.callback (/srv/node_modules/request/request.js:185:22)
at emitTwo (events.js:126:13)
i am not sure why it fails. Please advise
It says that you should only place the size that is the increments of 0.00000001 (the base_increment below). While your size is at precision 9: 0.012613515, it is rejected.
Currently, I cannot find the base_increment in the /products endpoint, but in the status channel:
// Status Message
{
"type": "status",
"products": [
{
"id": "BTC-USD",
"base_currency": "BTC",
"quote_currency": "USD",
"base_min_size": "0.001",
"base_max_size": "70",
"base_increment": "0.00000001", // Here you go
"quote_increment": "0.01",
"display_name": "BTC/USD",
"status": "online",
"status_message": null,
"min_market_funds": "10",
"max_market_funds": "1000000",
"post_only": false,
"limit_only": false,
"cancel_only": false
}
],
...
}
I can add an update this, definitely the order float algebra is a little wonky and differs by exchange. For CB/CB Pro you'll want to get info on your base_increment for sell orders and quote_increment for buy orders, in string format, run a function like this:
def get_increments(ticker, auth_client):
products = auth_client.get_products()
for i in range(len(products)):
if products[i]['id'] == ticker:
base_incr = products[i]['base_increment']
quote_incr = products[i]['quote_increment']
return base_incr, quote_incr
i. Next, you'll want to utilize those increments to round down. I divide by the appropriate increment, use floor division by 1, then multiply by the same increment.
ii. To be safe, I get a decimal count again using string manipulation functions, and run something like round(qty, decimals). This cleans up the occasional lagging string of 9999999 or else 00000001.
iii. If you're looking to trade with 100% equity, use the funds argument in buy (quote asset value), and size argument in sell (base asset value).
Buy code looks something like:
decimals = int(str(quote_incr).find('1'))-int(str(quote_incr).find('.'))
quote_incr = float(quote_incr)
qty = (qty/quote_incr//1)*quote_incr
qty = str(round(qty,decimals))
auth_client.place_market_order(product_id=ticker, side='buy', funds=qty)
While sell code looks something like:
decimals = int(str(base_incr).find('1'))-int(str(base_incr).find('.'))
base_incr = float(base_incr_
qty = (qty/base_incr//1)*base_incr
qty = str(round(qty,decimals))
auth_client.place_market_order(product_id=ticker, side='buy', size=qty)

How to flatten an array in a nested json in aws glue using pyspark?

I am trying to flatten a JSON file to be able to load it into PostgreSQL all in AWS Glue. I am using PySpark. Using a crawler I crawl the S3 JSON and produce a table. I then use an ETL Glue script to:
read the crawled table
use the 'Relationalize' function to flatten the file
convert the dynamic frame to a dataframe
try to 'explode' the request.data field
Script so far:
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = glue_source_database, table_name = glue_source_table, transformation_ctx = "datasource0")
df0 = Relationalize.apply(frame = datasource0, staging_path = glue_temp_storage, name = dfc_root_table_name, transformation_ctx = "dfc")
df1 = df0.select(dfc_root_table_name)
df2 = df1.toDF()
df2 = df1.select(explode(col('`request.data`')).alias("request_data"))
<then i write df1 to a PostgreSQL database which works fine>
Issues I face:
The 'Relationalize' function works well except the request.data field which becomes a bigint and therefore 'explode' doesn't work.
Explode cannot be done without using 'Relationalize' on the JSON first due to the structure of the data. Specifically the error is: "org.apache.spark.sql.AnalysisException: cannot resolve 'explode(request.data)' due to data type mismatch: input to function explode should be array or map type, not bigint"
If I try to make the dynamic frame a dataframe first then I get this issue: "py4j.protocol.Py4JJavaError: An error occurred while calling o72.jdbc.
: java.lang.IllegalArgumentException: Can't get JDBC type for struct..."
I tried to also upload a classifier so that the data would flatten in the crawl itself but AWS confirmed this wouldn't work.
The JSON format of the original file is as follows, that I an trying to normalise:
- field1
- field2
- {}
- field3
- {}
- field4
- field5
- []
- {}
- field6
- {}
- field7
- field8
- {}
- field9
- {}
- field10
# Flatten nested df
def flatten_df(nested_df):
for col in nested_df.columns:
array_cols = [c[0] for c in nested_df.dtypes if c[1][:5] == 'array']
for col in array_cols:
nested_df =nested_df.withColumn(col, F.explode_outer(nested_df[col]))
nested_cols = [c[0] for c in nested_df.dtypes if c[1][:6] == 'struct']
if len(nested_cols) == 0:
return nested_df
flat_cols = [c[0] for c in nested_df.dtypes if c[1][:6] != 'struct']
flat_df = nested_df.select(flat_cols +
[F.col(nc+'.'+c).alias(nc+'_'+c)
for nc in nested_cols
for c in nested_df.select(nc+'.*').columns])
return flatten_df(flat_df)
df=flatten_df(df)
It will replace all dots with underscore. Note that it uses explode_outer and not explode to include Null value in case array itself is null. This function is available in spark v2.4+ only.
Also remember, exploding array will add more duplicates and overall row size will increase. Flattening struct will increase column size. In short, your original df will explode horizontally and vertically. It may slow down processing data later.
Therefore my recommendation would be to identify feature related data and store only those data in postgresql and original json files in s3.
Once you have rationalized the json column, you don't need to explode it. Relationalize transforms the nested JSON into key-value pairs at the outermost level of the JSON document. The transformed data maintains a list of the original keys from the nested JSON separated by periods.
Example :
Nested json :
{
"player": {
"username": "user1",
"characteristics": {
"race": "Human",
"class": "Warlock",
"subclass": "Dawnblade",
"power": 300,
"playercountry": "USA"
},
"arsenal": {
"kinetic": {
"name": "Sweet Business",
"type": "Auto Rifle",
"power": 300,
"element": "Kinetic"
},
"energy": {
"name": "MIDA Mini-Tool",
"type": "Submachine Gun",
"power": 300,
"element": "Solar"
},
"power": {
"name": "Play of the Game",
"type": "Grenade Launcher",
"power": 300,
"element": "Arc"
}
},
"armor": {
"head": "Eye of Another World",
"arms": "Philomath Gloves",
"chest": "Philomath Robes",
"leg": "Philomath Boots",
"classitem": "Philomath Bond"
},
"location": {
"map": "Titan",
"waypoint": "The Rig"
}
}
}
Flattened out json after rationalize :
{
"player.username": "user1",
"player.characteristics.race": "Human",
"player.characteristics.class": "Warlock",
"player.characteristics.subclass": "Dawnblade",
"player.characteristics.power": 300,
"player.characteristics.playercountry": "USA",
"player.arsenal.kinetic.name": "Sweet Business",
"player.arsenal.kinetic.type": "Auto Rifle",
"player.arsenal.kinetic.power": 300,
"player.arsenal.kinetic.element": "Kinetic",
"player.arsenal.energy.name": "MIDA Mini-Tool",
"player.arsenal.energy.type": "Submachine Gun",
"player.arsenal.energy.power": 300,
"player.arsenal.energy.element": "Solar",
"player.arsenal.power.name": "Play of the Game",
"player.arsenal.power.type": "Grenade Launcher",
"player.arsenal.power.power": 300,
"player.arsenal.power.element": "Arc",
"player.armor.head": "Eye of Another World",
"player.armor.arms": "Philomath Gloves",
"player.armor.chest": "Philomath Robes",
"player.armor.leg": "Philomath Boots",
"player.armor.classitem": "Philomath Bond",
"player.location.map": "Titan",
"player.location.waypoint": "The Rig"
}
Thus in your case, request.data is already a new column flattened out from request column and its type is interpreted as bigint by spark.
Reference : Simplify/querying nested json with the aws glue relationalize transform

Attribute Syntax for JSON query in check_json.pl

So, I'm trying to set up check_json.pl in NagiosXI to monitor some statistics. https://github.com/c-kr/check_json
I'm using the code with the modification I submitted in pull request #32, so line numbers reflect that code.
The json query returns something like this:
[
{
"total_bytes": 123456,
"customer_name": "customer1",
"customer_id": "1",
"indices": [
{
"total_bytes": 12345,
"index": "filename1"
},
{
"total_bytes": 45678,
"index": "filename2"
},
],
"total": "765.43gb"
},
{
"total_bytes": 123456,
"customer_name": "customer2",
"customer_id": "2",
"indices": [
{
"total_bytes": 12345,
"index": "filename1"
},
{
"total_bytes": 45678,
"index": "filename2"
},
],
"total": "765.43gb"
}
]
I'm trying to monitor the sized of specific files. so a check should look something like:
/path/to/check_json.pl -u https://path/to/my/json -a "SOMETHING" -p "SOMETHING"
...where I'm trying to figure out the SOMETHINGs so that I can monitor the total_bytes of filename1 in customer2 where I know the customer_id and index but not their position in the respective arrays.
I can monitor customer1's total bytes by using the string "[0]->{'total_bytes'}" but I need to be able to specify which customer and dig deeper into file name (known) and file size (stat to monitor) AND the working query only gives me the status (OK,WARNING, or CRITICAL). Adding -p all I get are errors....
The error with -p no matter how I've been able to phrase it is always:
Not a HASH reference at ./check_json.pl line 235.
Even when I can get a valid OK from the example "[0]->{'total_bytes'}", using that in -p still gives the same error.
Links pointing to documentation on the format to use would be very helpful. Examples in the README for the script or in the -h output are failing me here. Any ideas?
I really have no idea what your question is. I'm sure I'm not alone, hence the downvotes.
Once you have the decoded json, if you have a customer_id to search for, you can do:
my ($customer_info) = grep {$_->{customer_id} eq $customer_id} #$json_response;
Regarding the error on line 235, this looks odd:
foreach my $key ($np->opts->perfvars eq '*' ? map { "{$_}"} sort keys %$json_response : split(',', $np->opts->perfvars)) {
# ....................................... ^^^^^^^^^^^^^
$perf_value = $json_response->{$key};
if perfvars eq "*", you appear to be looking for $json_reponse->{"{total}"} for example. You might want to validate the user's input:
die "no such key in json data: '$key'\n" unless exists $json_response->{$key};
This entire business of stringifying the hash ref lookups just smells bad.
A better question would look like:
I have this JSON data. How do I get the sum of total_bytes for the customer with id 1?
See https://stackoverflow.com/help/mcve

Creating SQL server RDS instance using Terraform

I'm going to create a SQL server database in RDS using Terraform. My Terraform file looks like this:
### RDS ###
# Subnet Group
resource "aws_db_subnet_group" "private" {
name = "db_arcgis-${var.env_name}-dbsubnet"
description = "Subnet Group for Arcgis ${var.env_tag}} DB"
subnet_ids = ["${aws_subnet.public1.id}", "${aws_subnet.public2.id}"]
tags {
Env = "${var.env_tag}"
}
}
# RDS DB parameter group
# Must enabled triggers to allow Multi-AZ
resource "aws_db_parameter_group" "allow_triggers" {
name = "arcgis-${var.env_name}-allow-triggers"
family = "sqlserver-se-12.0"
description = "Parameter Group for Arcgis ${var.env_tag} to allow triggers"
parameter {
name = "log_bin_trust_function_creators"
value = "1"
}
tags {
Env = "${var.env_tag}"
}
}
# RDS
resource "aws_db_instance" "main" {
allocated_storage = "${var.db_size}"
engine = "${var.db_engine}"
engine_version = "${var.db_version}"
instance_class = "${var.db_instance}"
identifier = "arcgis-${var.env_name}-db"
name = "${var.db_name}"
username = "${var.db_username}"
password = "${var.db_password}"
db_subnet_group_name = "${aws_db_subnet_group.private.id}"
parameter_group_name = "${aws_db_parameter_group.allow_triggers.id}"
multi_az = "${var.db_multiaz}"
vpc_security_group_ids = ["${aws_security_group.private_rds.id}"]
#availability_zone = "${var.vpc_az1}"
publicly_accessible = "true"
backup_retention_period = "2"
apply_immediately = "true"
tags {
Env = "${var.env_tag}"
}
}
I get this error by applying the Terraform files:
Error applying plan:
1 error(s) occurred:
* aws_db_parameter_group.allow_triggers: Error modifying DB Parameter Group: InvalidParameterValue: Could not find parameter with name: log_bin_trust_function_creators
status code: 400, request id: d298ab14-8b94-11e6-a088-31e21873c378
The obvious issue here is that log_bin_trust_function_creators isn't an available parameter for the sqlserver-se-12.0 parameter group family as you can see here when listing all the parameters in a parameter group based on sqlserver-se-12.0:
$ aws rds describe-db-parameters --db-parameter-group-name test-sqlserver-se-12-0 --query 'Parameters[*].ParameterName'
[
"1204",
"1211",
"1222",
"1224",
"2528",
"3205",
"3226",
"3625",
"4199",
"4616",
"6527",
"7806",
"access check cache bucket count",
"access check cache quota",
"ad hoc distributed queries",
"affinity i/o mask",
"affinity mask",
"agent xps",
"allow updates",
"backup compression default",
"blocked process threshold (s)",
"c2 audit mode",
"clr enabled",
"contained database authentication",
"cost threshold for parallelism",
"cross db ownership chaining",
"cursor threshold",
"database mail xps",
"default full-text language",
"default language",
"default trace enabled",
"disallow results from triggers",
"filestream access level",
"fill factor (%)",
"ft crawl bandwidth (max)",
"ft crawl bandwidth (min)",
"ft notify bandwidth (max)",
"ft notify bandwidth (min)",
"in-doubt xact resolution",
"index create memory (kb)",
"lightweight pooling",
"locks",
"max degree of parallelism",
"max full-text crawl range",
"max server memory (mb)",
"max text repl size (b)",
"max worker threads",
"media retention",
"min memory per query (kb)",
"min server memory (mb)",
"nested triggers",
"network packet size (b)",
"ole automation procedures",
"open objects",
"optimize for ad hoc workloads",
"ph timeout (s)",
"priority boost",
"query governor cost limit",
"query wait (s)",
"recovery interval (min)",
"remote access",
"remote admin connections",
"remote login timeout (s)",
"remote proc trans",
"remote query timeout (s)",
"replication xps",
"scan for startup procs",
"server trigger recursion",
"set working set size",
"show advanced options",
"smo and dmo xps",
"transform noise words",
"two digit year cutoff",
"user connections",
"user options",
"xp_cmdshell"
]
Instead that parameter is only available in MySQL flavours:
$ aws rds describe-db-parameters --db-parameter-group-name default.mysql5.6 --query 'Parameters[*].ParameterName'
[
"allow-suspicious-udfs",
"auto_increment_increment",
"auto_increment_offset",
"autocommit",
"automatic_sp_privileges",
"back_log",
"basedir",
"binlog_cache_size",
"binlog_checksum",
"binlog_error_action",
"binlog_format",
"binlog_max_flush_queue_time",
"binlog_order_commits",
"binlog_row_image",
"binlog_rows_query_log_events",
"binlog_stmt_cache_size",
"binlogging_impossible_mode",
"bulk_insert_buffer_size",
"character-set-client-handshake",
"character_set_client",
"character_set_connection",
"character_set_database",
"character_set_filesystem",
"character_set_results",
"character_set_server",
"collation_connection",
"collation_server",
"completion_type",
"concurrent_insert",
"connect_timeout",
"core-file",
"datadir",
"default_storage_engine",
"default_time_zone",
"default_tmp_storage_engine",
"default_week_format",
"delay_key_write",
"delayed_insert_limit",
"delayed_insert_timeout",
"delayed_queue_size",
"div_precision_increment",
"end_markers_in_json",
"enforce_gtid_consistency",
"eq_range_index_dive_limit",
"event_scheduler",
"explicit_defaults_for_timestamp",
"flush",
"flush_time",
"ft_boolean_syntax",
"ft_max_word_len",
"ft_min_word_len",
"ft_query_expansion_limit",
"ft_stopword_file",
"general_log",
"general_log_file",
"group_concat_max_len",
"gtid-mode",
"host_cache_size",
"init_connect",
"innodb_adaptive_flushing",
"innodb_adaptive_flushing_lwm",
"innodb_adaptive_hash_index",
"innodb_adaptive_max_sleep_delay",
"innodb_autoextend_increment",
"innodb_autoinc_lock_mode",
"innodb_buffer_pool_dump_at_shutdown",
"innodb_buffer_pool_dump_now",
"innodb_buffer_pool_filename",
"innodb_buffer_pool_instances",
"innodb_buffer_pool_load_abort",
"innodb_buffer_pool_load_at_startup",
"innodb_buffer_pool_load_now",
"innodb_buffer_pool_size",
"innodb_change_buffer_max_size",
"innodb_change_buffering",
"innodb_checksum_algorithm",
"innodb_cmp_per_index_enabled",
"innodb_commit_concurrency",
"innodb_compression_failure_threshold_pct",
"innodb_compression_level",
"innodb_compression_pad_pct_max",
"innodb_concurrency_tickets",
"innodb_data_home_dir",
"innodb_fast_shutdown",
"innodb_file_format",
"innodb_file_per_table",
"innodb_flush_log_at_timeout",
"innodb_flush_log_at_trx_commit",
"innodb_flush_method",
"innodb_flush_neighbors",
"innodb_flushing_avg_loops",
"innodb_force_load_corrupted",
"innodb_ft_aux_table",
"innodb_ft_cache_size",
"innodb_ft_enable_stopword",
"innodb_ft_max_token_size",
"innodb_ft_min_token_size",
"innodb_ft_num_word_optimize",
"innodb_ft_result_cache_limit",
"innodb_ft_server_stopword_table",
"innodb_ft_sort_pll_degree",
"innodb_ft_user_stopword_table",
"innodb_io_capacity",
"innodb_io_capacity_max",
"innodb_large_prefix",
"innodb_lock_wait_timeout",
"innodb_log_buffer_size",
"innodb_log_compressed_pages",
"innodb_log_file_size",
"innodb_log_group_home_dir",
"innodb_lru_scan_depth",
"innodb_max_dirty_pages_pct",
"innodb_max_purge_lag",
"innodb_max_purge_lag_delay",
"innodb_monitor_disable",
"innodb_monitor_enable",
"innodb_monitor_reset",
"innodb_monitor_reset_all",
"innodb_old_blocks_pct",
"innodb_old_blocks_time",
"innodb_online_alter_log_max_size",
"innodb_open_files",
"innodb_optimize_fulltext_only",
"innodb_page_size",
"innodb_print_all_deadlocks",
"innodb_purge_batch_size",
"innodb_purge_threads",
"innodb_random_read_ahead",
"innodb_read_ahead_threshold",
"innodb_read_io_threads",
"innodb_read_only",
"innodb_replication_delay",
"innodb_rollback_on_timeout",
"innodb_rollback_segments",
"innodb_sort_buffer_size",
"innodb_spin_wait_delay",
"innodb_stats_auto_recalc",
"innodb_stats_method",
"innodb_stats_on_metadata",
"innodb_stats_persistent",
"innodb_stats_persistent_sample_pages",
"innodb_stats_transient_sample_pages",
"innodb_strict_mode",
"innodb_support_xa",
"innodb_sync_array_size",
"innodb_sync_spin_loops",
"innodb_table_locks",
"innodb_thread_concurrency",
"innodb_thread_sleep_delay",
"innodb_undo_directory",
"innodb_undo_logs",
"innodb_undo_tablespaces",
"innodb_use_native_aio",
"innodb_write_io_threads",
"interactive_timeout",
"join_buffer_size",
"keep_files_on_create",
"key_buffer_size",
"key_cache_age_threshold",
"key_cache_block_size",
"key_cache_division_limit",
"lc_time_names",
"local_infile",
"lock_wait_timeout",
"log-bin",
"log_bin_trust_function_creators",
"log_bin_use_v1_row_events",
"log_error",
"log_output",
"log_queries_not_using_indexes",
"log_slave_updates",
"log_slow_admin_statements",
"log_slow_slave_statements",
"log_throttle_queries_not_using_indexes",
"log_warnings",
"long_query_time",
"low_priority_updates",
"lower_case_table_names",
"master-info-repository",
"master_verify_checksum",
"max_allowed_packet",
"max_binlog_cache_size",
"max_binlog_size",
"max_binlog_stmt_cache_size",
"max_connect_errors",
"max_connections",
"max_delayed_threads",
"max_error_count",
"max_heap_table_size",
"max_insert_delayed_threads",
"max_join_size",
"max_length_for_sort_data",
"max_prepared_stmt_count",
"max_seeks_for_key",
"max_sort_length",
"max_sp_recursion_depth",
"max_tmp_tables",
"max_user_connections",
"max_write_lock_count",
"metadata_locks_cache_size",
"min_examined_row_limit",
"myisam_data_pointer_size",
"myisam_max_sort_file_size",
"myisam_mmap_size",
"myisam_sort_buffer_size",
"myisam_stats_method",
"myisam_use_mmap",
"net_buffer_length",
"net_read_timeout",
"net_retry_count",
"net_write_timeout",
"old-style-user-limits",
"old_passwords",
"optimizer_prune_level",
"optimizer_search_depth",
"optimizer_switch",
"optimizer_trace",
"optimizer_trace_features",
"optimizer_trace_limit",
"optimizer_trace_max_mem_size",
"optimizer_trace_offset",
"performance_schema",
"performance_schema_accounts_size",
"performance_schema_digests_size",
"performance_schema_events_stages_history_long_size",
"performance_schema_events_stages_history_size",
"performance_schema_events_statements_history_long_size",
"performance_schema_events_statements_history_size",
"performance_schema_events_waits_history_long_size",
"performance_schema_events_waits_history_size",
"performance_schema_hosts_size",
"performance_schema_max_cond_classes",
"performance_schema_max_cond_instances",
"performance_schema_max_file_classes",
"performance_schema_max_file_handles",
"performance_schema_max_file_instances",
"performance_schema_max_mutex_classes",
"performance_schema_max_mutex_instances",
"performance_schema_max_rwlock_classes",
"performance_schema_max_rwlock_instances",
"performance_schema_max_socket_classes",
"performance_schema_max_socket_instances",
"performance_schema_max_stage_classes",
"performance_schema_max_statement_classes",
"performance_schema_max_table_handles",
"performance_schema_max_table_instances",
"performance_schema_max_thread_classes",
"performance_schema_max_thread_instances",
"performance_schema_session_connect_attrs_size",
"performance_schema_setup_actors_size",
"performance_schema_setup_objects_size",
"performance_schema_users_size",
"pid_file",
"plugin_dir",
"port",
"preload_buffer_size",
"profiling_history_size",
"query_alloc_block_size",
"query_cache_limit",
"query_cache_min_res_unit",
"query_cache_size",
"query_cache_type",
"query_cache_wlock_invalidate",
"query_prealloc_size",
"range_alloc_block_size",
"read_buffer_size",
"read_only",
"read_rnd_buffer_size",
"relay-log",
"relay_log_info_repository",
"relay_log_recovery",
"safe-user-create",
"secure_auth",
"secure_file_priv",
"server_id",
"simplified_binlog_gtid_recovery",
"skip-character-set-client-handshake",
"skip-slave-start",
"skip_external_locking",
"skip_name_resolve",
"skip_show_database",
"slave_checkpoint_group",
"slave_checkpoint_period",
"slave_parallel_workers",
"slave_pending_jobs_size_max",
"slave_sql_verify_checksum",
"slave_type_conversions",
"slow_launch_time",
"slow_query_log",
"slow_query_log_file",
"socket",
"sort_buffer_size",
"sql_mode",
"sql_select_limit",
"stored_program_cache",
"sync_binlog",
"sync_frm",
"sync_master_info",
"sync_relay_log",
"sync_relay_log_info",
"sysdate-is-now",
"table_definition_cache",
"table_open_cache",
"table_open_cache_instances",
"temp-pool",
"thread_cache_size",
"thread_stack",
"time_zone",
"timed_mutexes",
"tmp_table_size",
"tmpdir",
"transaction_alloc_block_size",
"transaction_prealloc_size",
"tx_isolation",
"updatable_views_with_limit",
"validate-password",
"validate_password_dictionary_file",
"validate_password_length",
"validate_password_mixed_case_count",
"validate_password_number_count",
"validate_password_policy",
"validate_password_special_char_count",
"wait_timeout"
]

Resources