I am trying to create a database using terraform and this seems very complicated for a poor query...
Could you help me, please?
I have tried null_resource with local-exec and data "external" Python...
I think I am looking the wrong way
ex which doesn't works in terraform 0.12
resource "null_resource" "create-endpoint" {
provisioner "local-exec" {
query = <<EOF
{
CREATE EXTERNAL TABLE `dashboard_loading_time`(
`timestamp_iso` string,
`app_identification` struct<service:string,app_name:string,app_type:string,stage:string>,
`user` struct<api_gateway_key:struct<id:string,name:string>,mashery_key:struct<id:string,name:string>,employee:struct<id:string,name:string>>,
`action` struct<action_type:string,path:string>,
`result` struct<status:string,http_status:string,response:struct<response:string>>)
PARTITIONED BY (
`year` int)
ROW FORMAT SERDE
'org.openx.data.jsonserde.JsonSerDe'
STORED AS INPUTFORMAT
'org.apache.hadoop.mapred.TextInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
LOCATION
's3://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/dev'
}
EOF
command = "aws athena start-query-execution --query-string "query""
}
}
I would like to find the simplest way to do this using terraform.
If you wanna make it for athena, need to make glue resources.
try below code with terraform.
variable "service_name" {
default = "demo-service"
}
variable "workspace" {
default = "dev"
}
variable "columns" {
default = {
id = "int"
type = "string"
status = "int"
created_at = "timestamp"
}
}
resource "aws_glue_catalog_database" "athena" {
name = "${var.service_name}_db"
}
resource "aws_glue_catalog_table" "athena" {
name = "${var.service_name}_logs"
database_name = "${aws_glue_catalog_database.athena.name}"
table_type = "EXTERNAL_TABLE"
parameters = {
EXTERNAL = "TRUE"
}
storage_descriptor {
location = "s3://${var.service_name}-${var.workspace}-data-pipeline/log/"
input_format = "org.apache.hadoop.mapred.TextInputFormat"
output_format = "org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat"
ser_de_info {
name = "jsonserde"
serialization_library = "org.openx.data.jsonserde.JsonSerDe"
parameters = {
"serialization.format" = "1"
}
}
dynamic "columns" {
for_each = "${var.columns}"
content {
name = "${columns.key}"
type = "${columns.value}"
}
}
}
partition_keys {
name = "year"
type = "string"
}
partition_keys {
name = "month"
type = "string"
}
partition_keys {
name = "day"
type = "string"
}
partition_keys {
name = "hour"
type = "string"
}
}
refer to this repository : aws-serverless-data-pipeline-by-terraform
resource "aws_glue_catalog_table" "aws_glue_catalog_table" {
name = "mytable"
database_name = aws_glue_catalog_database.aws_glue_catalog_database.name
table_type = "EXTERNAL_TABLE"
parameters = {
"classification" = "json"
}
storage_descriptor {
location = "s3://mybucket/myprefix"
input_format = "org.apache.hadoop.mapred.TextInputFormat"
output_format = "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
ser_de_info {
name = "myserdeinfo"
serialization_library = "org.openx.data.jsonserde.JsonSerDe"
parameters = {
"paths" = "jsonrootname"
}
}
columns {
name = "column1"
type = "array<struct<resourcearn:string,tags:array<struct<key:string,value:string>>>>"
}
}
partition_keys {
name = "part1"
type = "string"
}
partition_keys {
name = "part2"
type = "string"
}
}
Related
I need to be able to search by the News Tags property.
This typoscript is working fine, but when I input a tag (exact same string) from a News into Solr's search box I don't get any results.
According to "lorenz" in this answer I neede to add my dynamic fields (I believe) to "plugin.tx_solr.query.fields" but this poor documentation doesn't help me.
I added the following to that typoscript but when I use the searchbox to look for exactly the name of my tag I get no results.
(plugin.tx_solr.query.fields = tags_stringM)
Does anybody have any code that I can use as an example?
plugin.tx_solr{
search {
initializeWithEmptyQuery = 1
showResultsOfInitialEmptyQuery = 1
showEmptyFacets = 1
faceting = 1
faceting {
facets {
mediatype {
label = Facet dos
field = categorytitle_stringS
}
}
}
}
index.queue {
custom_news = 1
custom_news {
table = tx_news_domain_model_news
fields {
abstract = teaser
author = author
authorEmail_stringS = author_email
title = title
datetime_stringS = TEXT
datetime_stringS {
field = datetime
date = d.m.Y H:i
}
titlehr_stringS = TEXT
titlehr_stringS {
field = title_hr
}
teaser_stringS = TEXT
teaser_stringS {
field = teaser
}
datetime_dateS = TEXT
datetime_dateS {
field = datetime
date = Y-m-d\TH:i:s\Z
}
content = SOLR_CONTENT
content {
cObject = COA
cObject {
10 = TEXT
10 {
field = bodytext
noTrimWrap = || |
}
20 = TEXT
20 {
field = tags_stringM
noTrimWrap = || |
}
}
}
categorytitle_stringS = SOLR_RELATION
categorytitle_stringS {
localField = categories
multiValue = 1
}
categoryuid_stringS = SOLR_RELATION
categoryuid_stringS {
localField = categories
foreignLabelField = uid
multiValue = 1
}
keywords = SOLR_MULTIVALUE
keywords {
field = keywords
}
tags_stringM = SOLR_RELATION
tags_stringM {
localField = tags
label = Tags
multiValue = 1
}
tagshr_stringM = SOLR_RELATION
tagshr_stringM {
localField = tags
label = Tags hr
foreignLabelField = title_hr
multiValue = 1
}
mediatypehr_intS = TEXT
mediatypehr_intS {
field = media_type_hr
}
mediatypede_intS = TEXT
mediatypede_intS {
field = media_type_de
}
image_stringS = FILES
image_stringS {
references {
table = tx_news_domain_model_news
uid.field = uid
fieldName = fal_media
}
begin = 0
maxItems = 1
renderObj = IMG_RESOURCE
renderObj {
file.import.data = file:current:publicUrl
file.maxW = 330
#wrap = |
}
}
url = CASE
url {
key.field = type
# Internal
1 = TEXT
1 {
if.isTrue.field = internalurl
typolink.parameter.field = internalurl
typolink.useCacheHash = 1
typolink.returnLast = url
}
# External
2 = TEXT
2 {
if.isTrue.field = externalurl
field = externalurl
}
default = TEXT
default {
typolink.parameter = {$plugin.tx_news.settings.detailPid}
typolink.additionalParams = &tx_news_pi1[controller]=News&tx_news_pi1[action]=detail&tx_news_pi1[news]={field:uid}&L={field:__solr_index_language}
typolink.additionalParams.insertData = 1
typolink.useCacheHash = 1
typolink.returnLast = url
}
}
}
attachments = 1
attachments {
fields = fal_related_files
fileExtensions = *
}
}
}
}
plugin.tx_solr.logging.indexing.queue.news = 1
Aside from defining the fields for indexing you also need to declare these fields as relevant for search. (and with a weight how important they are for searching)
One possible solution could be like in this part of typoscript:
plugin {
tx_solr {
search {
query {
queryFields := addToList(categorytitle_stringS^2.0,keywords2.0,tags_stringM^2.0, tagshr_stringM^2.0)
}
}
}
}
see manual
I am trying to access all the values from a for_each statement in google_compute_instance resource
I want to get all the values [dev-1, dev-2] in thename attribute and parse it to vm_name in my metadata_startup_script
resource "google_compute_instance" "compute_instance" {
project = var.project_id
for_each = toset(["1", "2"])
name = "dev-${each.key}"
machine_type = "e2-micro"
zone = "${var.region}-b"
boot_disk {
initialize_params {
image = "ubuntu-os-cloud/ubuntu-1804-lts"
}
}
network_interface {
network = "default"
access_config {
}
}
lifecycle {
ignore_changes = [attached_disk]
}
metadata_startup_script = templatefile("copy.tftpl", {
vm_name = "${google_compute_instance.compute_instance.0.name}"
nfs_ip = "${google_filestore_instance.instance.networks[0].ip_addresses[0]}"
file_share_name = "${google_filestore_instance.instance.file_shares[0].name}"
zone = "${var.region}-b"
})
}
I am unable to get all compute instance from the name argument
I get this error message
╷
│ Error: Cycle: google_compute_instance.compute_instance["2"], google_compute_instance.compute_instance["1"]
│
│
How do I resolve this issue so I can get all the virtual machine name and parse it to vm_name variable?
I would change the hardcoded elements in your for_each to a variable,
and pass that to your vm_name, something like this:
locals {
compute_names = ["dev-1", "dev-2"]
}
resource "google_compute_instance" "compute_instance" {
project = var.project_id
for_each = toset(local.compute_names)
name = each.key
machine_type = "e2-micro"
zone = "${var.region}-b"
...
metadata_startup_script = templatefile("copy.tftpl", {
vm_name = local.compute_names
...
zone = "${var.region}-b"
})
}
I use the following TS from documentation (apache-solr-for-typo3) for a simple facet
plugin.tx_solr.search {
faceting = 1
faceting {
facets {
contentType {
label = Content Type
field = type
}
}
}
}
The result is e.g.
sys_file_metadata (691)
tt_address (341)
tx_news_domain_model_news (180)
pages (153)
tx_events2_domain_model_event (2)
How can I rename e.g. tt_address to Personen and the other titles for better readability?
Here is my configuration for tt_address
plugin.tx_solr.index.queue {
tt_address = 1
tt_address {
table = tt_address
lable = Personen
additionalWhereClause = first_name NOT LIKE ''
fields {
abstract = position
description = description
title = name
personen_stringS = Personen
content = SOLR_CONTENT
content {
field = title
}
url = TEXT
url {
typolink.parameter.field = detail
typolink.returnLast = url
typolink.useCacheHash = 0
}
}
}
}
TYPO3 10.4.18., solr 11.0.4
It's not at the queuing where you set the label.
plugin {
tx_solr {
search {
faceting = 1
faceting {
facets {
# contentType {
# label = Content Type
# field = type
# }
contentType {
label = Filter
field = type
renderingInstruction = CASE
renderingInstruction {
key.field = optionValue
pages = TEXT
pages.value = Internetseiten
tx_solr_file = TEXT
tx_solr_file.value = Dateien
tx_news_domain_model_news = TEXT
tx_news_domain_model_news.value = Nachrichten
tt_address = TEXT
tt_address.value = Adressen
}
}
:
I am trying to create a terraform script that will register an application in Azure AD.
I have been successful when generating a script that only reads from Microsoft Graph scopes. But I am having trouble figuring out what the equivalent of those scopes are in Business Central (Cloud version).
For Microsoft Graph I have these permissions:
email
offline_access
openid
profile
Financials.ReadWrite.All
User.Read
And I read them like this in terraform:
provider "azuread" {
# Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used
version = "~> 0.10"
subscription_id = var.subscription_id
}
data "azuread_service_principal" "graph-api" {
display_name = "Microsoft Graph"
}
locals {
MAIL_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("email"))[0]}"
USER_READ_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("User.Read"))[0]}"
FINANCIALS_READ_WRITE_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("Financials.ReadWrite.All"))[0]}"
OFFLINE_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("offline_access"))[0]}"
OPENID_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("openid"))[0]}"
PROFILE_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("profile"))[0]}"
}
Which seems to be working fine. I am just struggling to find the similar way of doing this for the Dynamics 365 Business Central
I am interested in these:
app_access
Financials.ReadWrite.All
user_impersonation
Does anybody know what that endpoint might look like? The documentation is very limited.
EDIT:
This is the final script for anybody interested in setting up an Business Central application registration
variable "subscription_id" {
type = string
}
variable "app_name" {
type = string
}
variable "app_homepage" {
type = string
}
variable "app_reply_urls" {
type = list(string)
}
provider "azuread" {
# Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used
version = "~> 0.10"
subscription_id = var.subscription_id
}
data "azuread_service_principal" "graph-api" {
display_name = "Microsoft Graph"
}
data "azuread_service_principal" "d365bc" {
display_name = "Dynamics 365 Business Central"
}
locals {
APP_ACCESS_PERMISSION = "${matchkeys(data.azuread_service_principal.d365bc.app_roles.*.id, data.azuread_service_principal.d365bc.app_roles.*.value, list("app_access"))[0]}"
USER_IMPERSONATION_PERMISSION = "${matchkeys(data.azuread_service_principal.d365bc.oauth2_permissions.*.id, data.azuread_service_principal.d365bc.oauth2_permissions.*.value, list("user_impersonation"))[0]}"
BC_FINANCIALS_READ_WRITE_PERMISSION = "${matchkeys(data.azuread_service_principal.d365bc.oauth2_permissions.*.id, data.azuread_service_principal.d365bc.oauth2_permissions.*.value, list("Financials.ReadWrite.All"))[0]}"
GRAPH_FINANCIAL_READ_WRITE_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("Financials.ReadWrite.All"))[0]}"
MAIL_READ_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("User.Read"))[0]}"
MAIL_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("email"))[0]}"
OFFLINE_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("offline_access"))[0]}"
OPENID_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("openid"))[0]}"
PROFILE_PERMISSION = "${matchkeys(data.azuread_service_principal.graph-api.oauth2_permissions.*.id, data.azuread_service_principal.graph-api.oauth2_permissions.*.value, list("profile"))[0]}"
}
resource "azuread_application" "businessCentral" {
name = var.app_name
homepage = var.app_homepage
identifier_uris = []
reply_urls = var.app_reply_urls
available_to_other_tenants = true
type = "webapp/api"
required_resource_access {
resource_app_id = data.azuread_service_principal.graph-api.application_id
resource_access {
id = local.GRAPH_FINANCIAL_READ_WRITE_PERMISSION
type = "Scope"
}
resource_access {
id = local.MAIL_PERMISSION
type = "Scope"
}
resource_access {
id = local.MAIL_READ_PERMISSION
type = "Scope"
}
resource_access {
id = local.OFFLINE_PERMISSION
type = "Scope"
}
resource_access {
id = local.OPENID_PERMISSION
type = "Scope"
}
resource_access {
id = local.PROFILE_PERMISSION
type = "Scope"
}
}
required_resource_access {
resource_app_id = data.azuread_service_principal.d365bc.application_id
resource_access {
id = local.APP_ACCESS_PERMISSION
type = "Role"
}
resource_access {
id = local.USER_IMPERSONATION_PERMISSION
type = "Scope"
}
resource_access {
id = local.BC_FINANCIALS_READ_WRITE_PERMISSION
type = "Scope"
}
}
app_role {
allowed_member_types = [
"Application"
]
description = "Admins can manage roles and perform all task actions"
display_name = "Admin"
is_enabled = true
value = "Admin"
}
}
One thing to note is that the app_access is Role and the rest of the API permissions are Scope.
You can call the above terraform with:
terraform plan -var="subscription_id={your_scription_id}" -var='app_reply_urls={your_urls_array}' -var="app_name={your_app_name}" -var="app_homepage={your_app_homepage}"
Try this:
provider "azuread" {
# Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used
version = "=0.10.0"
}
data "azuread_service_principal" "d365bc" {
application_id = "996def3d-b36c-4153-8607-a6fd3c01b89f"
}
locals {
APP_ACCESS_PERMISSION = "${matchkeys(data.azuread_service_principal.d365bc.app_roles.*.id, data.azuread_service_principal.d365bc.app_roles.*.value, list("app_access"))[0]}"
USER_IMPERSONATION_PERMISSION = "${matchkeys(data.azuread_service_principal.d365bc.oauth2_permissions.*.id, data.azuread_service_principal.d365bc.oauth2_permissions.*.value, list("user_impersonation"))[0]}"
FINANCIALS_READ_WRITE_PERMISSION = "${matchkeys(data.azuread_service_principal.d365bc.oauth2_permissions.*.id, data.azuread_service_principal.d365bc.oauth2_permissions.*.value, list("Financials.ReadWrite.All"))[0]}"
}
996def3d-b36c-4153-8607-a6fd3c01b89f is the client id of Microsoft Dynamics 365 BC service principal.
app_access is app permission so we need to use "app_roles" rather than "oauth2_permissions" here.
To establish an RPC connection in the community edition we need to specify the rpc username, password and permissions but when we are integrating external database like MySQL and change the datasource type from INMEMORY to "DB" it does not allows to give user properties.
these are the settings I am using in my node.conf
security = {
authService = {
dataSource = {
type = "DB"
passwordEncryption = "SHIRO_1_CRYPT"
connection = {
jdbcUrl = "jdbc:mysql://localhost:3306"
username = "root"
password = "password"
driverClassName = "com.mysql.jdbc.Driver"
}
}
options = {
cache = {
expireAfterSecs = 120
maxEntries = 10000
}
}
}
Maybe I didn't understand your question, but database setup in node.conf is separate from RPC user setup in node.conf:
Database (PostGres in my case)
extraConfig = [
'dataSourceProperties.dataSourceClassName' : 'org.postgresql.ds.PGSimpleDataSource',
'dataSourceProperties.dataSource.url' : 'jdbc:postgresql://localhost:5432/postgres',
'dataSourceProperties.dataSource.user' : 'db_user',
'dataSourceProperties.dataSource.password' : 'db_user_password',
'database.transactionIsolationLevel' : 'READ_COMMITTED',
'database.initialiseSchema' : 'true'
]
RPC User
rpcUsers = [[ user: "rpc_user", "password": "rpc_user_password", "permissions": ["ALL"]]]
Ok, I'm adding my node's node.config (it's part of Corda TestNet, and it's deployed on Google Cloud):
baseDirectory = "."
compatibilityZoneURL = "https://netmap.testnet.r3.com"
emailAddress = "xxx"
jarDirs = [ "plugins", "cordapps" ]
sshd { port = 2222 }
myLegalName = "OU=xxx, O=TESTNET_xxx, L=London, C=GB"
keyStorePassword = "xxx"
trustStorePassword = "xxx"
crlCheckSoftFail = true
database = {
transactionIsolationLevel = "READ_COMMITTED"
initialiseSchema = "true"
}
dataSourceProperties {
dataSourceClassName = "org.postgresql.ds.PGSimpleDataSource"
dataSource.url = "jdbc:postgresql://xxx:xxx/postgres"
dataSource.user = xxx
dataSource.password = xxx
}
p2pAddress = "xxx:xxx"
rpcSettings {
useSsl = false
standAloneBroker = false
address = "0.0.0.0:xxx"
adminAddress = "0.0.0.0:xxx"
}
rpcUsers = [
{ username=cordazoneservice, password=xxx, permissions=[ ALL ] }
]
devMode = false
cordappSignerKeyFingerprintBlacklist = []
useTestClock = false