forked from convictional/trigger-workflow-and-wait
-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathentrypoint.sh
More file actions
executable file
·252 lines (212 loc) · 6.62 KB
/
entrypoint.sh
File metadata and controls
executable file
·252 lines (212 loc) · 6.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
#!/usr/bin/env bash
set -e
usage_docs() {
echo ""
echo "You can use this Github Action with:"
echo "- uses: convictional/trigger-workflow-and-wait"
echo " with:"
echo " owner: keithconvictional"
echo " repo: myrepo"
echo " github_token: \${{ secrets.GITHUB_PERSONAL_ACCESS_TOKEN }}"
echo " workflow_file_name: main.yaml"
}
GITHUB_SERVER_URL="${SERVER_URL:-https://github.com}"
NOT_FOUND_RETRIES=0
validate_args() {
wait_interval=10 # Waits for 10 seconds
if [ "${INPUT_WAIT_INTERVAL}" ]
then
wait_interval=${INPUT_WAIT_INTERVAL}
fi
propagate_failure=true
if [ -n "${INPUT_PROPAGATE_FAILURE}" ]
then
propagate_failure=${INPUT_PROPAGATE_FAILURE}
fi
trigger_workflow=true
if [ -n "${INPUT_TRIGGER_WORKFLOW}" ]
then
trigger_workflow=${INPUT_TRIGGER_WORKFLOW}
fi
wait_workflow=true
if [ -n "${INPUT_WAIT_WORKFLOW}" ]
then
wait_workflow=${INPUT_WAIT_WORKFLOW}
fi
if [ -z "${INPUT_OWNER}" ]
then
echo "Error: Owner is a required argument."
usage_docs
exit 1
fi
if [ -z "${INPUT_REPO}" ]
then
echo "Error: Repo is a required argument."
usage_docs
exit 1
fi
if [ -z "${INPUT_GITHUB_TOKEN}" ]
then
echo "Error: Github token is required. You can head over settings and"
echo "under developer, you can create a personal access tokens. The"
echo "token requires repo access."
usage_docs
exit 1
fi
if [ -z "${INPUT_WORKFLOW_FILE_NAME}" ]
then
echo "Error: Workflow File Name is required"
usage_docs
exit 1
fi
client_payload=$(echo '{}' | jq -c)
if [ "${INPUT_CLIENT_PAYLOAD}" ]
then
client_payload=$(echo "${INPUT_CLIENT_PAYLOAD}" | jq -c)
fi
ref="main"
if [ "$INPUT_REF" ]
then
ref="${INPUT_REF}"
fi
}
lets_wait() {
local interval=${1:-$wait_interval}
echo >&2 "Sleeping for $interval seconds"
sleep "$interval"
}
api() {
local path=$1; shift
local result
if result=$(gh api \
"repos/${INPUT_OWNER}/${INPUT_REPO}/actions/${path}" \
-H 'Accept: application/vnd.github.v3+json' \
"$@" 2>&1)
then
echo "$result"
else
echo >&2 "api failed:"
echo >&2 "path: $path"
echo >&2 "response: $result"
# gh api does not retry on transient network errors, so we handle them here
# by returning an empty JSON object and letting the caller's polling loop retry.
# gh formats DNS errors specially ("error connecting to <host>"), all other
# network errors use Go's url.Error format ('<Method> "<URL>": <inner error>').
if echo "$result" | grep -qiE "error connecting to|connection refused|connection timed out|operation timed out|context deadline exceeded|i/o timeout|TLS handshake timeout|connection reset by peer|broken pipe"; then
echo "{}"
echo >&2 "Transient network error - trying again"
elif [ $NOT_FOUND_RETRIES -lt 3 ] && echo "$result" | grep -q "Not Found"; then
echo "{}"
NOT_FOUND_RETRIES=$((NOT_FOUND_RETRIES + 1))
echo >&2 "Not found (attempt $NOT_FOUND_RETRIES) - trying again"
elif echo "$result" | grep -q "Server Error"; then
echo "{}"
echo >&2 "Server error - trying again"
else
exit 1
fi
fi
}
# Return the ids of the most recent workflow runs, optionally filtered by user
get_workflow_runs() {
since=${1:?}
query="event=workflow_dispatch&created=>=$since${INPUT_GITHUB_USER+&actor=}${INPUT_GITHUB_USER}&per_page=100"
echo "Getting workflow runs using query: ${query}" >&2
api "workflows/${INPUT_WORKFLOW_FILE_NAME}/runs?${query}" |
jq -r '.workflow_runs[].id' |
sort # Sort to ensure repeatable order, and lexicographically for compatibility with join
}
trigger_workflow() {
START_TIME=$(date +%s)
SINCE=$(date -u -Iseconds -d "@$((START_TIME - 120))") # Two minutes ago, to overcome clock skew
OLD_RUNS=$(get_workflow_runs "$SINCE")
echo >&2 "Triggering workflow:"
echo >&2 " workflows/${INPUT_WORKFLOW_FILE_NAME}/dispatches"
echo >&2 " {\"ref\":\"${ref}\",\"inputs\":${client_payload}}"
dispatch_response=$(api "workflows/${INPUT_WORKFLOW_FILE_NAME}/dispatches" \
--method POST \
--input - <<EOF
{"ref":"${ref}","inputs":${client_payload}}
EOF
)
# Check if the response contains workflow_run_id (new GitHub API behavior)
workflow_run_id=$(echo "$dispatch_response" | jq -r '.workflow_run_id // empty')
if [ -n "$workflow_run_id" ]; then
echo >&2 "Workflow run ID returned directly: $workflow_run_id"
echo "$workflow_run_id"
return
fi
# Fall back to polling approach (old behavior)
echo >&2 "No workflow_run_id in response, falling back to polling"
NEW_RUNS=$OLD_RUNS
while [ "$NEW_RUNS" = "$OLD_RUNS" ]
do
lets_wait
NEW_RUNS=$(get_workflow_runs "$SINCE")
done
# Return new run ids
join -v2 <(echo "$OLD_RUNS") <(echo "$NEW_RUNS")
}
comment_downstream_link() {
gh api "${INPUT_COMMENT_DOWNSTREAM_URL}" \
--method POST \
-H "Authorization: Bearer ${INPUT_COMMENT_GITHUB_TOKEN}" \
-f body="Running downstream job at $1" || \
echo >&2 "failed to comment to ${INPUT_COMMENT_DOWNSTREAM_URL}:"
}
wait_for_workflow_to_finish() {
last_workflow_id=${1:?}
last_workflow_url="${GITHUB_SERVER_URL}/${INPUT_OWNER}/${INPUT_REPO}/actions/runs/${last_workflow_id}"
echo "Waiting for workflow to finish:"
echo "The workflow id is [${last_workflow_id}]."
echo "The workflow logs can be found at ${last_workflow_url}"
echo "workflow_id=${last_workflow_id}" >> $GITHUB_OUTPUT
echo "workflow_url=${last_workflow_url}" >> $GITHUB_OUTPUT
echo ""
if [ -n "${INPUT_COMMENT_DOWNSTREAM_URL}" ]; then
comment_downstream_link ${last_workflow_url}
fi
conclusion=null
status=
while [[ "${conclusion}" == "null" && "${status}" != "completed" ]]
do
lets_wait
workflow=$(api "runs/$last_workflow_id")
conclusion=$(echo "${workflow}" | jq -r '.conclusion')
status=$(echo "${workflow}" | jq -r '.status')
echo "Checking conclusion [${conclusion}]"
echo "Checking status [${status}]"
echo "conclusion=${conclusion}" >> $GITHUB_OUTPUT
done
if [[ "${conclusion}" == "success" && "${status}" == "completed" ]]
then
echo "Yes, success"
else
# Alternative "failure"
echo "Conclusion is not success, it's [${conclusion}]."
if [ "${propagate_failure}" = true ]
then
echo "Propagating failure to upstream job"
exit 1
fi
fi
}
main() {
validate_args
if [ "${trigger_workflow}" = true ]
then
run_ids=$(trigger_workflow)
else
echo "Skipping triggering the workflow."
fi
if [ "${wait_workflow}" = true ]
then
for run_id in $run_ids
do
wait_for_workflow_to_finish "$run_id"
done
else
echo "Skipping waiting for workflow."
fi
}
main