Ajith Kumara Beragala Acharige Lal
12/28/2020, 11:58 AMgraphql
, not sure what causing this , any ideaAjith Kumara Beragala Acharige Lal
12/28/2020, 12:00 PMAjith Kumara Beragala Acharige Lal
12/28/2020, 12:00 PM# debug mode
debug = false
# base configuration directory (typically you won't change this!)
home_dir = "~/.prefect"
backend = "cloud"
[server]
host = "<http://10.110.0.7>"
port = "4200"
host_port = "4200"
endpoint = "${server.host}:${server.port}"
[server.database]
host = "10.110.0.7"
port = "5433"
host_port = "5433"
name = "prefect_server"
username = "prefect"
# set to "" to generate a random password each time the database starts
password = "test-password"
connection_url = "postgresql://${server.database.username}:${server.database.password}@${server.database.host}:${server.database.port}/${server.database.name}"
volume_path = "${home_dir}/pg_data"
[server.graphql]
host = "0.0.0.0"
port = "4201"
host_port = "4201"
debug = false
path = "/graphql/"
[server.hasura]
host = "10.110.0.7"
port = "3000"
host_port = "3000"
admin_secret = "" # a string. One will be automatically generated if not provided.
claims_namespace = "hasura-claims"
graphql_url = "http://${server.hasura.host}:${server.hasura.port}/v1alpha1/graphql"
ws_url = "ws://${server.hasura.host}:${server.hasura.port}/v1alpha1/graphql"
execute_retry_seconds = 10
[server.ui]
host = "<http://10.110.0.7>"
port = "8089"
host_port = "8089"
endpoint = "${server.ui.host}:${server.ui.port}"
apollo_url = "<http://0.0.0.0:4200/graphql>"
[server.telemetry]
enabled = true
[cloud]
api = "${${backend}.endpoint}"
endpoint = "<https://api.prefect.io>"
graphql = "${cloud.api}/graphql"
use_local_secrets = true
heartbeat_interval = 30.0
check_cancellation_interval = 15.0
diagnostics = false
# rate at which to batch upload logs
logging_heartbeat = 5
queue_interval = 30.0
[cloud.agent]
name = "agent"
labels = "[]"
# Set to `DEBUG` for verbose logging
level = "INFO"
# Agents require different API tokens
auth_token = ""
# Internal address for agent health checks, etc...
agent_address = ""
[cloud.agent.resource_manager]
# Separate loop interval for resource managers
loop_interval = 60
[logging]
# The logging level: NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL
level = "INFO"
# The log format
format = "[%(asctime)s] %(levelname)s - %(name)s | %(message)s"
# additional log attributes to extract from context
# e.g., log_attributes = "['context_var']"
log_attributes = "[]"
# the timestamp format
datefmt = "%Y-%m-%d %H:%M:%S%z"
# Send logs to Prefect Cloud
log_to_cloud = false
# Extra loggers for Prefect log configuration
extra_loggers = "[]"
[flows]
# If true, edges are checked for cycles as soon as they are added to the flow. If false,
# cycles are only checked when tasks are sorted (for example, when running or
# serializing the flow). Defaults to false because it can affect the performance of
# large flows.
eager_edge_validation = false
# If true, `flow.run` will run on schedule by default.
# If false, only a single execution will occur (no retries, etc.)
run_on_schedule = true
# If true, tasks which set `checkpoint=True` will have their result handlers called
checkpointing = false
[flows.defaults]
[flows.defaults.storage]
# Whether to include a storage's default labels. Useful for
# controlling Agent's workflows.
add_default_labels = true
# the default storage class, specified using a full path
default_class = "prefect.storage.Local"
[tasks]
[tasks.defaults]
# the number of times tasks retry before they fail.
# false indicates that tasks should never retry (equivalent to max_retries = 0)
max_retries = false
# the amount of time tasks should wait before retrying, in seconds.
# false indicates that tasks have no default value (users must specify one to set it)
retry_delay = false
[engine]
[engine.executor]
# the default executor, specified using a full path
default_class = "prefect.executors.LocalExecutor"
[engine.executor.dask]
# the default scheduler address for the DaskExecutor.
address = ""
# the default Cluster class to use to create a temporary dask cluster
cluster_class = "distributed.deploy.local.LocalCluster"
[engine.flow_runner]
# the default flow runner, specified using a full path
default_class = "prefect.engine.flow_runner.FlowRunner"
[engine.task_runner]
# the default task runner, specified using a full path
default_class = "prefect.engine.task_runner.TaskRunner"
Ajith Kumara Beragala Acharige Lal
12/28/2020, 12:01 PMdocker ps | grep prefect
outputAjith Kumara Beragala Acharige Lal
12/28/2020, 12:02 PM#netstat -ltnp
outputAjith Kumara Beragala Acharige Lal
12/28/2020, 12:03 PMAmanda Wee
12/28/2020, 12:06 PMserver
since you're using prefect server rather than prefect cloudAjith Kumara Beragala Acharige Lal
12/28/2020, 12:42 PMbackend
="server"
I've tried following <http://localhost:4200/graphql>
, <http://0.0.0.0:4200/graphql,http://127.0.0.1:4200/graphql>
and <http://10.110.0.7:4200/graphql>
as Prefect Server GraphQL endpoint:
, do you see any other issues in my config file? any suggestions make this work ? thank you very much!Amanda Wee
12/28/2020, 12:49 PMAmanda Wee
12/28/2020, 12:51 PMAjith Kumara Beragala Acharige Lal
12/28/2020, 12:56 PMAjith Kumara Beragala Acharige Lal
12/28/2020, 12:57 PMAjith Kumara Beragala Acharige Lal
12/28/2020, 12:58 PMprefect backend server
and then prefect server start
Ajith Kumara Beragala Acharige Lal
12/28/2020, 12:59 PMAmanda Wee
12/28/2020, 1:02 PMAjith Kumara Beragala Acharige Lal
12/28/2020, 1:05 PMconfig.toml
and let you know, thanks a lot @Amanda WeeAmanda Wee
12/28/2020, 1:12 PMAjith Kumara Beragala Acharige Lal
12/28/2020, 1:42 PMThis is the latest config.toml i tried , but still no luck...
Ajith Kumara Beragala Acharige Lal
12/28/2020, 1:42 PM# debug mode
debug = true
# base configuration directory (typically you won't change this!)
home_dir = "~/.prefect"
backend = "server"
[server]
host = "<http://localhost>"
port = "4200"
host_port = "4200"
endpoint = "${server.host}:${server.port}"
[server.database]
host = "10.110.0.7"
port = "5433"
host_port = "5433"
name = "prefect_server"
username = "prefect"
# set to "" to generate a random password each time the database starts
password = "test-password"
connection_url = "postgresql://${server.database.username}:${server.database.password}@${server.database.host}:${server.database.port}/${server.database.name}"
volume_path = "${home_dir}/pg_data"
[server.graphql]
host = "0.0.0.0"
port = "4201"
host_port = "4201"
debug = true
path = "/graphql/"
[server.hasura]
host = "localhost"
port = "3000"
host_port = "3000"
admin_secret = "" # a string. One will be automatically generated if not provided.
claims_namespace = "hasura-claims"
graphql_url = "http://${server.hasura.host}:${server.hasura.port}/v1alpha1/graphql"
ws_url = "ws://${server.hasura.host}:${server.hasura.port}/v1alpha1/graphql"
execute_retry_seconds = 10
[server.ui]
host = "<http://localhost>"
port = "8089"
host_port = "8089"
endpoint = "${server.ui.host}:${server.ui.port}"
apollo_url = "<http://localhost:4200/graphql>"
[server.telemetry]
enabled = true
[cloud]
api = "${${backend}.endpoint}"
endpoint = "<https://api.prefect.io>"
graphql = "${cloud.api}/graphql"
use_local_secrets = true
heartbeat_interval = 30.0
check_cancellation_interval = 15.0
diagnostics = false
# rate at which to batch upload logs
logging_heartbeat = 5
queue_interval = 30.0
[cloud.agent]
name = "agent"
labels = "[]"
# Set to `DEBUG` for verbose logging
level = "INFO"
# Agents require different API tokens
auth_token = ""
# Internal address for agent health checks, etc...
agent_address = ""
[cloud.agent.resource_manager]
# Separate loop interval for resource managers
loop_interval = 60
[logging]
# The logging level: NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL
level = "INFO"
# The log format
format = "[%(asctime)s] %(levelname)s - %(name)s | %(message)s"
# additional log attributes to extract from context
# e.g., log_attributes = "['context_var']"
log_attributes = "[]"
# the timestamp format
datefmt = "%Y-%m-%d %H:%M:%S%z"
# Send logs to Prefect Cloud
log_to_cloud = false
# Extra loggers for Prefect log configuration
extra_loggers = "[]"
[flows]
# If true, edges are checked for cycles as soon as they are added to the flow. If false,
# cycles are only checked when tasks are sorted (for example, when running or
# serializing the flow). Defaults to false because it can affect the performance of
# large flows.
eager_edge_validation = false
# If true, `flow.run` will run on schedule by default.
# If false, only a single execution will occur (no retries, etc.)
run_on_schedule = true
# If true, tasks which set `checkpoint=True` will have their result handlers called
checkpointing = false
[flows.defaults]
[flows.defaults.storage]
# Whether to include a storage's default labels. Useful for
# controlling Agent's workflows.
add_default_labels = true
# the default storage class, specified using a full path
default_class = "prefect.storage.Local"
[tasks]
[tasks.defaults]
# the number of times tasks retry before they fail.
# false indicates that tasks should never retry (equivalent to max_retries = 0)
max_retries = false
# the amount of time tasks should wait before retrying, in seconds.
# false indicates that tasks have no default value (users must specify one to set it)
retry_delay = false
[engine]
[engine.executor]
# the default executor, specified using a full path
default_class = "prefect.executors.LocalExecutor"
[engine.executor.dask]
# the default scheduler address for the DaskExecutor.
address = ""
# the default Cluster class to use to create a temporary dask cluster
cluster_class = "distributed.deploy.local.LocalCluster"
[engine.flow_runner]
# the default flow runner, specified using a full path
default_class = "prefect.engine.flow_runner.FlowRunner"
[engine.task_runner]
# the default task runner, specified using a full path
default_class = "prefect.engine.task_runner.TaskRunner"
Ajith Kumara Beragala Acharige Lal
12/28/2020, 1:43 PMAjith Kumara Beragala Acharige Lal
12/28/2020, 2:10 PM