forked from DefinitelyTyped/DefinitelyTyped
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bull.d.ts
309 lines (251 loc) · 11.2 KB
/
bull.d.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
// Type definitions for bull 0.7.0
// Project: https://github.com/OptimalBits/bull
// Definitions by: Bruno Grieder <https://github.com/bgrieder>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
/// <reference path="../redis/redis.d.ts" />
declare module "bull" {
import * as Redis from "redis";
/**
* This is the Queue constructor.
* It creates a new Queue that is persisted in Redis.
* Everytime the same queue is instantiated it tries to process all the old jobs that may exist from a previous unfinished session.
*/
function Bull(queueName: string, redisPort: number, redisHost: string, redisOpt?: Redis.ClientOpts): Bull.Queue;
namespace Bull {
export interface DoneCallback {
(error?: Error, value?: any): void
}
export interface Job {
id: string
/**
* The custom data passed when the job was created
*/
data: Object;
/**
* Report progress on a job
*/
progress(value: any): Promise<void>;
/**
* Removes a Job from the queue from all the lists where it may be included.
* @returns {Promise} A promise that resolves when the job is removed.
*/
remove(): Promise<void>;
/**
* Rerun a Job that has failed.
* @returns {Promise} A promise that resolves when the job is scheduled for retry.
*/
retry(): Promise<void>;
}
export interface Backoff {
/**
* Backoff type, which can be either `fixed` or `exponential`
*/
type: string
/**
* Backoff delay, in milliseconds
*/
delay: number;
}
export interface AddOptions {
/**
* An amount of miliseconds to wait until this job can be processed.
* Note that for accurate delays, both server and clients should have their clocks synchronized
*/
delay?: number;
/**
* A number of attempts to retry if the job fails [optional]
*/
attempts?: number;
/**
* Backoff setting for automatic retries if the job fails
*/
backoff?: number | Backoff
/**
* A boolean which, if true, adds the job to the right
* of the queue instead of the left (default false)
*/
lifo?: boolean;
/**
* The number of milliseconds after which the job should be fail with a timeout error
*/
timeout?: number;
}
export interface Queue {
/**
* Defines a processing function for the jobs placed into a given Queue.
*
* The callback is called everytime a job is placed in the queue.
* It is passed an instance of the job as first argument.
*
* The done callback can be called with an Error instance, to signal that the job did not complete successfully,
* or with a result as second argument as second argument (e.g.: done(null, result);) when the job is successful.
* Errors will be passed as a second argument to the "failed" event;
* results, as a second argument to the "completed" event.
*
* concurrency: Bull will then call you handler in parallel respecting this max number.
*/
process(concurrency: number, callback: (job: Job, done: DoneCallback) => void): void;
/**
* Defines a processing function for the jobs placed into a given Queue.
*
* The callback is called everytime a job is placed in the queue.
* It is passed an instance of the job as first argument.
*
* The done callback can be called with an Error instance, to signal that the job did not complete successfully,
* or with a result as second argument as second argument (e.g.: done(null, result);) when the job is successful.
* Errors will be passed as a second argument to the "failed" event;
* results, as a second argument to the "completed" event.
*/
process(callback: (job: Job, done: DoneCallback) => void): void;
/**
* Defines a processing function for the jobs placed into a given Queue.
*
* The callback is called everytime a job is placed in the queue.
* It is passed an instance of the job as first argument.
*
* A promise must be returned to signal job completion.
* If the promise is rejected, the error will be passed as a second argument to the "failed" event.
* If it is resolved, its value will be the "completed" event's second argument.
*
* concurrency: Bull will then call you handler in parallel respecting this max number.
*/
process(concurrency: number, callback: (job: Job) => void): Promise<any>;
/**
* Defines a processing function for the jobs placed into a given Queue.
*
* The callback is called everytime a job is placed in the queue.
* It is passed an instance of the job as first argument.
*
* A promise must be returned to signal job completion.
* If the promise is rejected, the error will be passed as a second argument to the "failed" event.
* If it is resolved, its value will be the "completed" event's second argument.
*/
process(callback: (job: Job) => void): Promise<any>;
// process(callback: (job: Job, done?: DoneCallback) => void): Promise<any>;
/**
* Creates a new job and adds it to the queue.
* If the queue is empty the job will be executed directly,
* otherwise it will be placed in the queue and executed as soon as possible.
*/
add(data: Object, opts?: AddOptions): Promise<Job>;
/**
* Returns a promise that resolves when the queue is paused.
* The pause is global, meaning that all workers in all queue instances for a given queue will be paused.
* A paused queue will not process new jobs until resumed,
* but current jobs being processed will continue until they are finalized.
*
* Pausing a queue that is already paused does nothing.
*/
pause(): Promise<void>;
/**
* Returns a promise that resolves when the queue is resumed after being paused.
* The resume is global, meaning that all workers in all queue instances for a given queue will be resumed.
*
* Resuming a queue that is not paused does nothing.
*/
resume(): Promise<void>;
/**
* Returns a promise that returns the number of jobs in the queue, waiting or paused.
* Since there may be other processes adding or processing jobs, this value may be true only for a very small amount of time.
*/
count(): Promise<number>;
/**
* Empties a queue deleting all the input lists and associated jobs.
*/
empty(): Promise<void>;
/**
* Closes the underlying redis client. Use this to perform a graceful shutdown.
*
* `close` can be called from anywhere, with one caveat:
* if called from within a job handler the queue won't close until after the job has been processed
*/
close(): Promise<void>;
/**
* Returns a promise that will return the job instance associated with the jobId parameter.
* If the specified job cannot be located, the promise callback parameter will be set to null.
*/
getJob(jobId: string): Promise<Job>;
/**
* Tells the queue remove all jobs created outside of a grace period in milliseconds.
* You can clean the jobs with the following states: completed, waiting, active, delayed, and failed.
*/
clean(gracePeriod: number, jobsState?: string): Promise<Job[]>;
/**
* Listens to queue events
* 'ready', 'error', 'activ', 'progress', 'completed', 'failed', 'paused', 'resumed', 'cleaned'
*/
on(eventName: string, callback: EventCallback): void;
}
interface EventCallback {
(...args: any[]): void
}
interface ReadyEventCallback extends EventCallback {
(): void;
}
interface ErrorEventCallback extends EventCallback {
(error: Error): void;
}
interface JobPromise {
/**
* Abort this job
*/
cancel(): void
}
interface ActiveEventCallback extends EventCallback {
(job: Job, jobPromise: JobPromise): void;
}
interface ProgressEventCallback extends EventCallback {
(job: Job, progress: any): void;
}
interface CompletedEventCallback extends EventCallback {
(job: Job, result: Object): void;
}
interface FailedEventCallback extends EventCallback {
(job: Job, error: Error): void;
}
interface PausedEventCallback extends EventCallback {
(): void;
}
interface ResumedEventCallback extends EventCallback {
(job?: Job): void;
}
/**
* @see clean() for details
*/
interface CleanedEventCallback extends EventCallback {
(jobs: Job[], type: string): void;
}
}
export = Bull;
}
declare module "bull/lib/priority-queue" {
import * as Bull from "bull";
import * as Redis from "redis";
/**
* This is the Queue constructor of priority queue.
*
* It works same a normal queue, with same function and parameters.
* The only difference is that the Queue#add() allow an options opts.priority
* that could take ["low", "normal", "medium", "hight", "critical"]. If no options provider, "normal" will be taken.
*
* The priority queue will process more often highter priority jobs than lower.
*/
function PQueue(queueName: string, redisPort: number, redisHost: string, redisOpt?: Redis.ClientOpts): PQueue.PriorityQueue;
namespace PQueue {
export interface AddOptions extends Bull.AddOptions {
/**
* "low", "normal", "medium", "high", "critical"
*/
priority?: string;
}
export interface PriorityQueue extends Bull.Queue {
/**
* Creates a new job and adds it to the queue.
* If the queue is empty the job will be executed directly,
* otherwise it will be placed in the queue and executed as soon as possible.
*/
add(data: Object, opts?: PQueue.AddOptions): Promise<Bull.Job>;
}
}
export = PQueue;
}