2018-10-18 19:23:05 +08:00
|
|
|
// Copyright 2018 The Gitea Authors. All rights reserved.
|
2022-11-28 02:20:29 +08:00
|
|
|
// SPDX-License-Identifier: MIT
|
2018-10-18 19:23:05 +08:00
|
|
|
|
|
|
|
package ui
|
|
|
|
|
|
|
|
import (
|
2022-11-19 16:12:33 +08:00
|
|
|
"context"
|
|
|
|
|
2022-08-25 10:31:57 +08:00
|
|
|
activities_model "code.gitea.io/gitea/models/activities"
|
2021-12-12 23:48:20 +08:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2022-06-13 17:37:59 +08:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
2021-12-10 09:27:50 +08:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 17:49:20 +08:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2022-10-12 13:18:26 +08:00
|
|
|
"code.gitea.io/gitea/modules/container"
|
2020-02-16 08:29:43 +08:00
|
|
|
"code.gitea.io/gitea/modules/graceful"
|
2018-10-18 19:23:05 +08:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/notification/base"
|
2020-02-16 08:29:43 +08:00
|
|
|
"code.gitea.io/gitea/modules/queue"
|
2018-10-18 19:23:05 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
notificationService struct {
|
2019-01-13 22:42:55 +08:00
|
|
|
base.NullNotifier
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
issueQueue *queue.WorkerPoolQueue[issueNotificationOpts]
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
issueNotificationOpts struct {
|
2020-02-18 16:52:57 +08:00
|
|
|
IssueID int64
|
|
|
|
CommentID int64
|
|
|
|
NotificationAuthorID int64
|
2020-04-07 00:33:34 +08:00
|
|
|
ReceiverID int64 // 0 -- ALL Watcher
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2022-01-21 01:46:10 +08:00
|
|
|
var _ base.Notifier = ¬ificationService{}
|
2018-10-18 19:23:05 +08:00
|
|
|
|
|
|
|
// NewNotifier create a new notificationService notifier
|
|
|
|
func NewNotifier() base.Notifier {
|
2020-02-16 08:29:43 +08:00
|
|
|
ns := ¬ificationService{}
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
ns.issueQueue = queue.CreateSimpleQueue("notification-service", handler)
|
2020-02-16 08:29:43 +08:00
|
|
|
return ns
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
func handler(items ...issueNotificationOpts) []issueNotificationOpts {
|
|
|
|
for _, opts := range items {
|
2022-08-25 10:31:57 +08:00
|
|
|
if err := activities_model.CreateOrUpdateIssueNotifications(opts.IssueID, opts.CommentID, opts.NotificationAuthorID, opts.ReceiverID); err != nil {
|
2019-06-13 03:41:28 +08:00
|
|
|
log.Error("Was unable to create issue notification: %v", err)
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
}
|
2022-01-23 05:22:14 +08:00
|
|
|
return nil
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
|
2020-02-16 08:29:43 +08:00
|
|
|
func (ns *notificationService) Run() {
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 19:49:59 +08:00
|
|
|
go graceful.GetManager().RunWithShutdownFns(ns.issueQueue.Run)
|
2020-02-16 08:29:43 +08:00
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyCreateIssueComment(ctx context.Context, doer *user_model.User, repo *repo_model.Repository,
|
2022-06-13 17:37:59 +08:00
|
|
|
issue *issues_model.Issue, comment *issues_model.Comment, mentions []*user_model.User,
|
2022-02-24 04:16:07 +08:00
|
|
|
) {
|
2022-01-21 01:46:10 +08:00
|
|
|
opts := issueNotificationOpts{
|
2020-02-18 16:52:57 +08:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
2019-11-12 16:33:34 +08:00
|
|
|
}
|
|
|
|
if comment != nil {
|
2020-02-18 16:52:57 +08:00
|
|
|
opts.CommentID = comment.ID
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
2020-02-16 08:29:43 +08:00
|
|
|
_ = ns.issueQueue.Push(opts)
|
2021-01-03 01:04:02 +08:00
|
|
|
for _, mention := range mentions {
|
2022-01-21 01:46:10 +08:00
|
|
|
opts := issueNotificationOpts{
|
2021-01-03 01:04:02 +08:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
ReceiverID: mention.ID,
|
|
|
|
}
|
|
|
|
if comment != nil {
|
|
|
|
opts.CommentID = comment.ID
|
|
|
|
}
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyNewIssue(ctx context.Context, issue *issues_model.Issue, mentions []*user_model.User) {
|
2020-02-16 08:29:43 +08:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
2020-02-18 16:52:57 +08:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: issue.Poster.ID,
|
2020-02-16 08:29:43 +08:00
|
|
|
})
|
2021-01-03 01:04:02 +08:00
|
|
|
for _, mention := range mentions {
|
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: issue.Poster.ID,
|
|
|
|
ReceiverID: mention.ID,
|
|
|
|
})
|
|
|
|
}
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
|
2023-01-25 12:47:53 +08:00
|
|
|
func (ns *notificationService) NotifyIssueChangeStatus(ctx context.Context, doer *user_model.User, commitID string, issue *issues_model.Issue, actionComment *issues_model.Comment, isClosed bool) {
|
2020-02-16 08:29:43 +08:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
2020-02-18 16:52:57 +08:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
2023-01-28 19:16:46 +08:00
|
|
|
CommentID: actionComment.ID,
|
2020-02-16 08:29:43 +08:00
|
|
|
})
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyIssueChangeTitle(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldTitle string) {
|
|
|
|
if err := issue.LoadPullRequest(ctx); err != nil {
|
2021-06-23 12:14:22 +08:00
|
|
|
log.Error("issue.LoadPullRequest: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2022-06-13 17:37:59 +08:00
|
|
|
if issue.IsPull && issues_model.HasWorkInProgressPrefix(oldTitle) && !issue.PullRequest.IsWorkInProgress() {
|
2021-06-23 12:14:22 +08:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyMergePullRequest(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
|
2020-02-16 08:29:43 +08:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
2020-02-18 16:52:57 +08:00
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
2020-02-16 08:29:43 +08:00
|
|
|
})
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyAutoMergePullRequest(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
|
|
|
|
ns.NotifyMergePullRequest(ctx, doer, pr)
|
2022-11-03 23:49:00 +08:00
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyNewPullRequest(ctx context.Context, pr *issues_model.PullRequest, mentions []*user_model.User) {
|
|
|
|
if err := pr.LoadIssue(ctx); err != nil {
|
2020-02-16 08:29:43 +08:00
|
|
|
log.Error("Unable to load issue: %d for pr: %d: Error: %v", pr.IssueID, pr.ID, err)
|
|
|
|
return
|
|
|
|
}
|
2022-10-12 13:18:26 +08:00
|
|
|
toNotify := make(container.Set[int64], 32)
|
2022-11-19 16:12:33 +08:00
|
|
|
repoWatchers, err := repo_model.GetRepoWatchersIDs(ctx, pr.Issue.RepoID)
|
2021-06-23 12:14:22 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Error("GetRepoWatchersIDs: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, id := range repoWatchers {
|
2022-10-12 13:18:26 +08:00
|
|
|
toNotify.Add(id)
|
2021-06-23 12:14:22 +08:00
|
|
|
}
|
2022-11-19 16:12:33 +08:00
|
|
|
issueParticipants, err := issues_model.GetParticipantsIDsByIssueID(ctx, pr.IssueID)
|
2021-06-23 12:14:22 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Error("GetParticipantsIDsByIssueID: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, id := range issueParticipants {
|
2022-10-12 13:18:26 +08:00
|
|
|
toNotify.Add(id)
|
2021-06-23 12:14:22 +08:00
|
|
|
}
|
|
|
|
delete(toNotify, pr.Issue.PosterID)
|
2021-01-03 01:04:02 +08:00
|
|
|
for _, mention := range mentions {
|
2022-10-12 13:18:26 +08:00
|
|
|
toNotify.Add(mention.ID)
|
2021-06-23 12:14:22 +08:00
|
|
|
}
|
|
|
|
for receiverID := range toNotify {
|
2021-01-03 01:04:02 +08:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: pr.Issue.PosterID,
|
2021-06-23 12:14:22 +08:00
|
|
|
ReceiverID: receiverID,
|
2021-01-03 01:04:02 +08:00
|
|
|
})
|
|
|
|
}
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyPullRequestReview(ctx context.Context, pr *issues_model.PullRequest, r *issues_model.Review, c *issues_model.Comment, mentions []*user_model.User) {
|
2022-01-21 01:46:10 +08:00
|
|
|
opts := issueNotificationOpts{
|
2020-02-18 16:52:57 +08:00
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: r.Reviewer.ID,
|
2019-11-12 16:33:34 +08:00
|
|
|
}
|
|
|
|
if c != nil {
|
2020-02-18 16:52:57 +08:00
|
|
|
opts.CommentID = c.ID
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
2020-02-16 08:29:43 +08:00
|
|
|
_ = ns.issueQueue.Push(opts)
|
2021-01-03 01:04:02 +08:00
|
|
|
for _, mention := range mentions {
|
2022-01-21 01:46:10 +08:00
|
|
|
opts := issueNotificationOpts{
|
2021-01-03 01:04:02 +08:00
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: r.Reviewer.ID,
|
|
|
|
ReceiverID: mention.ID,
|
|
|
|
}
|
|
|
|
if c != nil {
|
|
|
|
opts.CommentID = c.ID
|
|
|
|
}
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyPullRequestCodeComment(ctx context.Context, pr *issues_model.PullRequest, c *issues_model.Comment, mentions []*user_model.User) {
|
2021-01-03 01:04:02 +08:00
|
|
|
for _, mention := range mentions {
|
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: c.Poster.ID,
|
|
|
|
CommentID: c.ID,
|
|
|
|
ReceiverID: mention.ID,
|
|
|
|
})
|
|
|
|
}
|
2018-10-18 19:23:05 +08:00
|
|
|
}
|
2020-04-07 00:33:34 +08:00
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyPullRequestPushCommits(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest, comment *issues_model.Comment) {
|
2022-01-21 01:46:10 +08:00
|
|
|
opts := issueNotificationOpts{
|
2020-05-20 20:47:24 +08:00
|
|
|
IssueID: pr.IssueID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
CommentID: comment.ID,
|
|
|
|
}
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyPullReviewDismiss(ctx context.Context, doer *user_model.User, review *issues_model.Review, comment *issues_model.Comment) {
|
2022-01-21 01:46:10 +08:00
|
|
|
opts := issueNotificationOpts{
|
2021-02-12 01:32:25 +08:00
|
|
|
IssueID: review.IssueID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
CommentID: comment.ID,
|
|
|
|
}
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyIssueChangeAssignee(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, assignee *user_model.User, removed bool, comment *issues_model.Comment) {
|
2022-02-18 14:06:57 +08:00
|
|
|
if !removed && doer.ID != assignee.ID {
|
2022-01-21 01:46:10 +08:00
|
|
|
opts := issueNotificationOpts{
|
2020-04-07 00:33:34 +08:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
ReceiverID: assignee.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
if comment != nil {
|
|
|
|
opts.CommentID = comment.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyPullReviewRequest(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, reviewer *user_model.User, isRequest bool, comment *issues_model.Comment) {
|
2020-04-07 00:33:34 +08:00
|
|
|
if isRequest {
|
2022-01-21 01:46:10 +08:00
|
|
|
opts := issueNotificationOpts{
|
2020-04-07 00:33:34 +08:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
ReceiverID: reviewer.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
if comment != nil {
|
|
|
|
opts.CommentID = comment.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
}
|
2021-03-01 08:47:30 +08:00
|
|
|
|
2022-11-19 16:12:33 +08:00
|
|
|
func (ns *notificationService) NotifyRepoPendingTransfer(ctx context.Context, doer, newOwner *user_model.User, repo *repo_model.Repository) {
|
2023-01-08 09:34:58 +08:00
|
|
|
err := db.WithTx(ctx, func(ctx context.Context) error {
|
2022-11-19 16:12:33 +08:00
|
|
|
return activities_model.CreateRepoTransferNotification(ctx, doer, newOwner, repo)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
log.Error("CreateRepoTransferNotification: %v", err)
|
2021-03-01 08:47:30 +08:00
|
|
|
}
|
|
|
|
}
|