mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2026-02-19 00:47:50 -05:00
chore: fix typos throughout the codebase (#10753)
This PR fixes a number of typos throughout the entire repository. Running https://github.com/crate-ci/typos and then changing all occurrences that I naively deemed "safe enough". Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/10753 Reviewed-by: Gusted <gusted@noreply.codeberg.org> Co-authored-by: Christoph Mewes <christoph@kubermatic.com> Co-committed-by: Christoph Mewes <christoph@kubermatic.com>
This commit is contained in:
parent
d934e0c9fb
commit
023a894677
87 changed files with 174 additions and 174 deletions
|
|
@ -568,7 +568,7 @@ Forgejo or set your environment appropriately.`, "")
|
|||
hookOptions.RefFullNames = make([]git.RefName, 0, hookBatchSize)
|
||||
|
||||
for {
|
||||
// note: pktLineTypeUnknow means pktLineTypeFlush and pktLineTypeData all allowed
|
||||
// note: pktLineTypeUnknown means pktLineTypeFlush and pktLineTypeData all allowed
|
||||
rs, err = readPktLine(ctx, reader, pktLineTypeUnknown)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -24,10 +24,10 @@ func runSendMail(ctx context.Context, c *cli.Command) error {
|
|||
}
|
||||
|
||||
subject := c.String("title")
|
||||
confirmSkiped := c.Bool("force")
|
||||
confirmSkipped := c.Bool("force")
|
||||
body := c.String("content")
|
||||
|
||||
if !confirmSkiped {
|
||||
if !confirmSkipped {
|
||||
if len(body) == 0 {
|
||||
fmt.Print("warning: Content is empty")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -314,11 +314,11 @@ func GetRunsNotDoneByRepoIDAndPullRequestID(ctx context.Context, repoID, pullReq
|
|||
// The title will be cut off at 255 characters if it's longer than 255 characters.
|
||||
// We don't have to send the ActionRunNowDone notification here because there are no runs that start in a not done status.
|
||||
func InsertRun(ctx context.Context, run *ActionRun, jobs []*jobparser.SingleWorkflow) error {
|
||||
ctx, commiter, err := db.TxContext(ctx)
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer commiter.Close()
|
||||
defer committer.Close()
|
||||
|
||||
index, err := db.GetNextResourceIndex(ctx, "action_run_index", run.RepoID)
|
||||
if err != nil {
|
||||
|
|
@ -345,7 +345,7 @@ func InsertRun(ctx context.Context, run *ActionRun, jobs []*jobparser.SingleWork
|
|||
return err
|
||||
}
|
||||
|
||||
return commiter.Commit()
|
||||
return committer.Commit()
|
||||
}
|
||||
|
||||
// Adds `ActionRunJob` instances from `SingleWorkflows` to an existing ActionRun.
|
||||
|
|
|
|||
|
|
@ -321,11 +321,11 @@ func GetAvailableJobsForRunner(e db.Engine, runner *ActionRunner) ([]*ActionRunJ
|
|||
}
|
||||
|
||||
func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask, bool, error) {
|
||||
ctx, commiter, err := db.TxContext(ctx)
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer commiter.Close()
|
||||
defer committer.Close()
|
||||
|
||||
e := db.GetEngine(ctx)
|
||||
|
||||
|
|
@ -414,7 +414,7 @@ func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask
|
|||
|
||||
task.Job = job
|
||||
|
||||
if err := commiter.Commit(); err != nil {
|
||||
if err := committer.Commit(); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -73,11 +73,11 @@ func increaseTasksVersionByScope(ctx context.Context, ownerID, repoID int64) err
|
|||
}
|
||||
|
||||
func IncreaseTaskVersion(ctx context.Context, ownerID, repoID int64) error {
|
||||
ctx, commiter, err := db.TxContext(ctx)
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer commiter.Close()
|
||||
defer committer.Close()
|
||||
|
||||
// 1. increase global
|
||||
if err := increaseTasksVersionByScope(ctx, 0, 0); err != nil {
|
||||
|
|
@ -101,5 +101,5 @@ func IncreaseTaskVersion(ctx context.Context, ownerID, repoID int64) error {
|
|||
}
|
||||
}
|
||||
|
||||
return commiter.Commit()
|
||||
return committer.Commit()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,14 +24,14 @@ func TestParseCommitWithSSHSignature(t *testing.T) {
|
|||
user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
|
||||
sshKey := unittest.AssertExistsAndLoadBean(t, &PublicKey{ID: 1000, OwnerID: 2})
|
||||
|
||||
t.Run("No commiter", func(t *testing.T) {
|
||||
t.Run("No committer", func(t *testing.T) {
|
||||
o := commitToGitObject(&git.Commit{})
|
||||
commitVerification := ParseObjectWithSSHSignature(db.DefaultContext, &o, &user_model.User{})
|
||||
assert.False(t, commitVerification.Verified)
|
||||
assert.Equal(t, NoKeyFound, commitVerification.Reason)
|
||||
})
|
||||
|
||||
t.Run("Commiter without keys", func(t *testing.T) {
|
||||
t.Run("Committer without keys", func(t *testing.T) {
|
||||
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
|
||||
|
||||
o := commitToGitObject(&git.Commit{Committer: &git.Signature{Email: user.Email}})
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ type ResourceIndex struct {
|
|||
}
|
||||
|
||||
var (
|
||||
// ErrResouceOutdated represents an error when request resource outdated
|
||||
ErrResouceOutdated = errors.New("resource outdated")
|
||||
// ErrResourceOutdated represents an error when request resource outdated
|
||||
ErrResourceOutdated = errors.New("resource outdated")
|
||||
// ErrGetResourceIndexFailed represents an error when resource index retries 3 times
|
||||
ErrGetResourceIndexFailed = errors.New("get resource index failed")
|
||||
)
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func TestRegisterMigration(t *testing.T) {
|
|||
"v99b_neat_migration.go", // no leading path
|
||||
"vb_neat_migration.go", // no version number
|
||||
"v12_neat_migration.go", // no migration group letter
|
||||
"v12a-neat-migration.go", // no undescore
|
||||
"v12a-neat-migration.go", // no underscore
|
||||
"v12a.go", // no descriptive identifier
|
||||
} {
|
||||
t.Run(fmt.Sprintf("bad name - %s", fn), func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func addFederatedUserActivityTables(x *xorm.Engine) {
|
|||
FollowingUserID int64 `xorm:"NOT NULL unique(fuf_rel)"`
|
||||
}
|
||||
|
||||
// Add InboxPath to FederatedUser & add index fo UserID
|
||||
// Add InboxPath to FederatedUser & add index to UserID
|
||||
type FederatedUser struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
UserID int64 `xorm:"NOT NULL INDEX user_id"`
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ func TestFindRenamedBranch(t *testing.T) {
|
|||
assert.True(t, exist)
|
||||
assert.Equal(t, "master", branch.To)
|
||||
|
||||
_, exist, err = git_model.FindRenamedBranch(db.DefaultContext, 1, "unknow")
|
||||
_, exist, err = git_model.FindRenamedBranch(db.DefaultContext, 1, "unknown")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, exist)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ func FindRepoProtectedBranchRules(ctx context.Context, repoID int64) (ProtectedB
|
|||
func FindAllMatchedBranches(ctx context.Context, repoID int64, ruleName string) ([]string, error) {
|
||||
results := make([]string, 0, 10)
|
||||
for page := 1; ; page++ {
|
||||
brancheNames, err := FindBranchNames(ctx, FindBranchOptions{
|
||||
branchNames, err := FindBranchNames(ctx, FindBranchOptions{
|
||||
ListOptions: db.ListOptions{
|
||||
PageSize: 100,
|
||||
Page: page,
|
||||
|
|
@ -63,12 +63,12 @@ func FindAllMatchedBranches(ctx context.Context, repoID int64, ruleName string)
|
|||
}
|
||||
rule := glob.MustCompile(ruleName)
|
||||
|
||||
for _, branch := range brancheNames {
|
||||
for _, branch := range branchNames {
|
||||
if rule.Match(branch) {
|
||||
results = append(results, branch)
|
||||
}
|
||||
}
|
||||
if len(brancheNames) < 100 {
|
||||
if len(branchNames) < 100 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ func prepareMigrationTasks() []*migration {
|
|||
|
||||
newMigration(102, "update migration repositories' service type", v1_11.DropColumnHeadUserNameOnPullRequest),
|
||||
newMigration(103, "Add WhitelistDeployKeys to protected branch", v1_11.AddWhitelistDeployKeysToBranches),
|
||||
newMigration(104, "remove unnecessary columns from label", v1_11.RemoveLabelUneededCols),
|
||||
newMigration(104, "remove unnecessary columns from label", v1_11.RemoveLabelUnneededCols),
|
||||
newMigration(105, "add includes_all_repositories to teams", v1_11.AddTeamIncludesAllRepositories),
|
||||
newMigration(106, "add column `mode` to table watch", v1_11.AddModeColumnToWatch),
|
||||
newMigration(107, "Add template options to repository", v1_11.AddTemplateToRepo),
|
||||
|
|
@ -323,7 +323,7 @@ func prepareMigrationTasks() []*migration {
|
|||
newMigration(268, "Update Action Ref", v1_21.UpdateActionsRefIndex),
|
||||
newMigration(269, "Drop deleted branch table", v1_21.DropDeletedBranchTable),
|
||||
newMigration(270, "Fix PackageProperty typo", v1_21.FixPackagePropertyTypo),
|
||||
newMigration(271, "Allow archiving labels", v1_21.AddArchivedUnixColumInLabelTable),
|
||||
newMigration(271, "Allow archiving labels", v1_21.AddArchivedUnixColumnInLabelTable),
|
||||
newMigration(272, "Add Version to ActionRun table", v1_21.AddVersionToActionRunTable),
|
||||
newMigration(273, "Add Action Schedule Table", v1_21.AddActionScheduleTable),
|
||||
newMigration(274, "Add Actions artifacts expiration date", v1_21.AddExpiredUnixColumnInActionArtifactTable),
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
func RemoveLabelUneededCols(x *xorm.Engine) error {
|
||||
func RemoveLabelUnneededCols(x *xorm.Engine) error {
|
||||
// Make sure the columns exist before dropping them
|
||||
type Label struct {
|
||||
QueryString string
|
||||
|
|
|
|||
|
|
@ -410,7 +410,7 @@ func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error {
|
|||
|
||||
official, err := isOfficialReviewer(sess, review.IssueID, reviewer)
|
||||
if err != nil {
|
||||
// Branch might not be proteced or other error, ignore it.
|
||||
// Branch might not be protected or other error, ignore it.
|
||||
continue
|
||||
}
|
||||
review.Official = official
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ func AddIssueResourceIndexTable(x *xorm.Engine) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Remove data we're goint to rebuild
|
||||
// Remove data we're going to rebuild
|
||||
if _, err := sess.Table("issue_index").Where("1=1").Delete(&ResourceIndex{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
func AddArchivedUnixColumInLabelTable(x *xorm.Engine) error {
|
||||
func AddArchivedUnixColumnInLabelTable(x *xorm.Engine) error {
|
||||
type Label struct {
|
||||
ArchivedUnix timeutil.TimeStamp `xorm:"DEFAULT NULL"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ func removeAllRepositories(ctx context.Context, t *organization.Team) (err error
|
|||
return err
|
||||
}
|
||||
|
||||
// Remove watches from all users and now unaccessible repos
|
||||
// Remove watches from all users and now inaccessible repos
|
||||
for _, user := range t.Members {
|
||||
has, err := access_model.HasAccess(ctx, user.ID, repo)
|
||||
if err != nil {
|
||||
|
|
@ -480,12 +480,12 @@ func removeTeamMember(ctx context.Context, team *organization.Team, userID int64
|
|||
return err
|
||||
}
|
||||
|
||||
// Remove watches from now unaccessible
|
||||
// Remove watches from now inaccessible
|
||||
if err := ReconsiderWatches(ctx, repo, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove issue assignments from now unaccessible
|
||||
// Remove issue assignments from now inaccessible
|
||||
if err := ReconsiderRepoIssuesAssignee(ctx, repo, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -191,7 +191,7 @@ func (org *Organization) IsGhost() bool {
|
|||
return org.AsUser().IsGhost()
|
||||
}
|
||||
|
||||
// FindOrgMembersOpts represensts find org members conditions
|
||||
// FindOrgMembersOpts represents find org members conditions
|
||||
type FindOrgMembersOpts struct {
|
||||
db.ListOptions
|
||||
Doer *user_model.User
|
||||
|
|
|
|||
|
|
@ -175,11 +175,11 @@ func CreatePendingRepositoryTransfer(ctx context.Context, doer, newOwner *user_m
|
|||
}
|
||||
|
||||
// GetPendingTransfers returns the pending transfers of recipient which were sent by by doer.
|
||||
func GetPendingTransferIDs(ctx context.Context, reciepientID, doerID int64) ([]int64, error) {
|
||||
func GetPendingTransferIDs(ctx context.Context, recipientID, doerID int64) ([]int64, error) {
|
||||
pendingTransferIDs := make([]int64, 0, 8)
|
||||
return pendingTransferIDs, db.GetEngine(ctx).Table("repo_transfer").
|
||||
Where("doer_id = ?", doerID).
|
||||
And("recipient_id = ?", reciepientID).
|
||||
And("recipient_id = ?", recipientID).
|
||||
Cols("id").
|
||||
Find(&pendingTransferIDs)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ import (
|
|||
func TestGetPendingTransferIDs(t *testing.T) {
|
||||
require.NoError(t, unittest.PrepareTestDatabase())
|
||||
doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
|
||||
reciepient := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
|
||||
pendingTransfer := unittest.AssertExistsAndLoadBean(t, &RepoTransfer{RecipientID: reciepient.ID, DoerID: doer.ID})
|
||||
recipient := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
|
||||
pendingTransfer := unittest.AssertExistsAndLoadBean(t, &RepoTransfer{RecipientID: recipient.ID, DoerID: doer.ID})
|
||||
|
||||
pendingTransferIDs, err := GetPendingTransferIDs(db.DefaultContext, reciepient.ID, doer.ID)
|
||||
pendingTransferIDs, err := GetPendingTransferIDs(db.DefaultContext, recipient.ID, doer.ID)
|
||||
require.NoError(t, err)
|
||||
if assert.Len(t, pendingTransferIDs, 1) {
|
||||
assert.Equal(t, pendingTransfer.ID, pendingTransferIDs[0])
|
||||
|
|
|
|||
|
|
@ -431,7 +431,7 @@ func AllUnitKeyNames() []string {
|
|||
return res
|
||||
}
|
||||
|
||||
// MinUnitAccessMode returns the minial permission of the permission map
|
||||
// MinUnitAccessMode returns the minimal permission of the permission map
|
||||
func MinUnitAccessMode(unitsMap map[Type]perm.AccessMode) perm.AccessMode {
|
||||
res := perm.AccessModeNone
|
||||
for t, mode := range unitsMap {
|
||||
|
|
@ -440,7 +440,7 @@ func MinUnitAccessMode(unitsMap map[Type]perm.AccessMode) perm.AccessMode {
|
|||
continue
|
||||
}
|
||||
|
||||
// get the minial permission great than AccessModeNone except all are AccessModeNone
|
||||
// get the minimal permission greater than AccessModeNone except all are AccessModeNone
|
||||
if mode > perm.AccessModeNone && (res == perm.AccessModeNone || mode < res) {
|
||||
res = mode
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ const DefaultHashAlgorithmName = "pbkdf2_hi"
|
|||
|
||||
var DefaultHashAlgorithm *PasswordHashAlgorithm
|
||||
|
||||
// aliasAlgorithNames provides a mapping between the value of PASSWORD_HASH_ALGO
|
||||
// aliasAlgorithmNames provides a mapping between the value of PASSWORD_HASH_ALGO
|
||||
// configured in the app.ini and the parameters used within the hashers internally.
|
||||
//
|
||||
// If it is necessary to change the default parameters for any hasher in future you
|
||||
|
|
|
|||
2
modules/cache/mutex_map_test.go
vendored
2
modules/cache/mutex_map_test.go
vendored
|
|
@ -57,7 +57,7 @@ func TestMutexMap_DifferentKeys(t *testing.T) {
|
|||
done := make(chan bool, 1)
|
||||
|
||||
go func() {
|
||||
// If these somehow refered to the same underlying `sync.Mutex`, because `sync.Mutex` is not re-entrant this would
|
||||
// If these somehow referred to the same underlying `sync.Mutex`, because `sync.Mutex` is not re-entrant this would
|
||||
// never complete.
|
||||
unlock1 := mm.Lock("test-key-1")
|
||||
unlock2 := mm.Lock("test-key-2")
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ const (
|
|||
WikiContext escapeContext = "wiki"
|
||||
// Rendered content (except markup), source code and blames.
|
||||
FileviewContext escapeContext = "file-view"
|
||||
// Commits or pull requet's diff.
|
||||
// Commits or pull request's diff.
|
||||
DiffContext escapeContext = "diff"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ func GetRepoRawDiffForFile(repo *Repository, startCommit, endCommit string, diff
|
|||
}
|
||||
|
||||
// ParseDiffHunkString parse the diffhunk content and return
|
||||
func ParseDiffHunkString(diffhunk string) (leftLine, leftHunk, rightLine, righHunk int) {
|
||||
func ParseDiffHunkString(diffhunk string) (leftLine, leftHunk, rightLine, rightHunk int) {
|
||||
ss := strings.Split(diffhunk, "@@")
|
||||
ranges := strings.Split(ss[1][1:], " ")
|
||||
leftRange := strings.Split(ranges[0], ",")
|
||||
|
|
@ -112,14 +112,14 @@ func ParseDiffHunkString(diffhunk string) (leftLine, leftHunk, rightLine, righHu
|
|||
rightRange := strings.Split(ranges[1], ",")
|
||||
rightLine, _ = strconv.Atoi(rightRange[0])
|
||||
if len(rightRange) > 1 {
|
||||
righHunk, _ = strconv.Atoi(rightRange[1])
|
||||
rightHunk, _ = strconv.Atoi(rightRange[1])
|
||||
}
|
||||
} else {
|
||||
log.Debug("Parse line number failed: %v", diffhunk)
|
||||
rightLine = leftLine
|
||||
righHunk = leftHunk
|
||||
rightHunk = leftHunk
|
||||
}
|
||||
return leftLine, leftHunk, rightLine, righHunk
|
||||
return leftLine, leftHunk, rightLine, rightHunk
|
||||
}
|
||||
|
||||
// Example: @@ -1,8 +1,9 @@ => [..., 1, 8, 1, 9]
|
||||
|
|
|
|||
|
|
@ -247,7 +247,7 @@ func TestCheckIfDiffDiffers(t *testing.T) {
|
|||
require.NoError(t, NewCommand(t.Context(), "switch", "-c", "e-2").Run(&RunOpts{Dir: tmpDir}))
|
||||
require.NoError(t, NewCommand(t.Context(), "rebase", "main-D-2").Run(&RunOpts{Dir: tmpDir}))
|
||||
|
||||
// The diff changed, because it no longers shows the change made to `README`.
|
||||
// The diff changed, because it no longer shows the change made to `README`.
|
||||
changed, err := gitRepo.CheckIfDiffDiffers("main-D-2", "e-1", "e-2", nil)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, changed) // This should be true.
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ index d8e4c92..19dc8ad 100644
|
|||
@@ -1,9 +1,10 @@
|
||||
--some comment
|
||||
--- some comment 5
|
||||
+--some coment 2
|
||||
+--some comment 2
|
||||
+-- some comment 3
|
||||
create or replace procedure test(p1 varchar2)
|
||||
is
|
||||
|
|
@ -135,7 +135,7 @@ func TestCutDiffAroundLine(t *testing.T) {
|
|||
@@ -1,9 +1,10 @@
|
||||
--some comment
|
||||
--- some comment 5
|
||||
+--some coment 2`
|
||||
+--some comment 2`
|
||||
assert.Equal(t, expected, minusDiff)
|
||||
|
||||
// Handle minus diffs properly
|
||||
|
|
@ -148,7 +148,7 @@ func TestCutDiffAroundLine(t *testing.T) {
|
|||
@@ -1,9 +1,10 @@
|
||||
--some comment
|
||||
--- some comment 5
|
||||
+--some coment 2
|
||||
+--some comment 2
|
||||
+-- some comment 3`
|
||||
|
||||
assert.Equal(t, expected, minusDiff)
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ func (f Format) Parser(r io.Reader) *Parser {
|
|||
return NewParser(r, f)
|
||||
}
|
||||
|
||||
// hexEscaped produces hex-escpaed characters from a string. For example, "\n\0"
|
||||
// hexEscaped produces hex-escaped characters from a string. For example, "\n\0"
|
||||
// would turn into "%0a%00".
|
||||
func (f Format) hexEscaped(delim []byte) string {
|
||||
escaped := ""
|
||||
|
|
|
|||
|
|
@ -473,7 +473,7 @@ func (repo *Repository) GetCommitsFromIDs(commitIDs []string, ignoreExistence bo
|
|||
// It's entirely possible the commit no longer exists, we only care
|
||||
// about the status and verification. Verification is no longer possible,
|
||||
// but getting the status is still possible with just the ID. We do have
|
||||
// to assumme the commitID is not shortened, we cannot recover the full
|
||||
// to assume the commitID is not shortened, we cannot recover the full
|
||||
// commitID.
|
||||
id, err := NewIDFromString(commitID)
|
||||
if err == nil {
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ func ColumnAndID(column string, id int64) []byte {
|
|||
// it's not bound to a particular table. The table should be part of the context
|
||||
// that the key was derived for, in which case it binds through that. Use this
|
||||
// over `ColumnAndID` if you're encrypting data that's stored inside JSON.
|
||||
// jsonSelector must be a unambigous selector to the JSON field that stores the
|
||||
// jsonSelector must be a unambiguous selector to the JSON field that stores the
|
||||
// encrypted data.
|
||||
func ColumnAndJSONSelectorAndID(column, jsonSelector string, id int64) []byte {
|
||||
return binary.BigEndian.AppendUint64(append(append([]byte(column), ':'), append([]byte(jsonSelector), ':')...), uint64(id))
|
||||
|
|
|
|||
|
|
@ -136,11 +136,11 @@ func getStorageSectionByType(rootCfg ConfigProvider, typ string) (ConfigSection,
|
|||
targetType := targetSec.Key("STORAGE_TYPE").String()
|
||||
if targetType == "" {
|
||||
if !IsValidStorageType(StorageType(typ)) {
|
||||
return nil, 0, fmt.Errorf("unknow storage type %q", typ)
|
||||
return nil, 0, fmt.Errorf("unknown storage type %q", typ)
|
||||
}
|
||||
targetSec.Key("STORAGE_TYPE").SetValue(typ)
|
||||
} else if !IsValidStorageType(StorageType(targetType)) {
|
||||
return nil, 0, fmt.Errorf("unknow storage type %q for section storage.%v", targetType, typ)
|
||||
return nil, 0, fmt.Errorf("unknown storage type %q for section storage.%v", targetType, typ)
|
||||
}
|
||||
|
||||
return targetSec, targetSecIsTyp, nil
|
||||
|
|
@ -166,7 +166,7 @@ func getStorageTargetSection(rootCfg ConfigProvider, name, typ string, sec Confi
|
|||
}
|
||||
}
|
||||
|
||||
// check stoarge name thirdly
|
||||
// check storage name thirdly
|
||||
targetSec, _ := rootCfg.GetSection(storageSectionName + "." + name)
|
||||
if targetSec != nil {
|
||||
targetType := targetSec.Key("STORAGE_TYPE").String()
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
type StateType string
|
||||
|
||||
const (
|
||||
// StateOpen pr is opend
|
||||
// StateOpen pr is opened
|
||||
StateOpen StateType = "open"
|
||||
// StateClosed pr is closed
|
||||
StateClosed StateType = "closed"
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ func TestApostrophesInMentions(t *testing.T) {
|
|||
assert.Equal(t, template.HTML("<p><a href=\"/mention-user\" class=\"mention\" rel=\"nofollow\">@mention-user</a>'s comment</p>\n"), rendered)
|
||||
}
|
||||
|
||||
func TestNonExistantUserMention(t *testing.T) {
|
||||
func TestNonExistentUserMention(t *testing.T) {
|
||||
rendered := RenderMarkdownToHtml(t.Context(), "@ThisUserDoesNotExist @mention-user")
|
||||
assert.Equal(t, template.HTML("<p>@ThisUserDoesNotExist <a href=\"/mention-user\" class=\"mention\" rel=\"nofollow\">@mention-user</a></p>\n"), rendered)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,9 +34,9 @@ type Validateable interface {
|
|||
}
|
||||
|
||||
func IsValid(v Validateable) (bool, error) {
|
||||
if valdationErrors := v.Validate(); len(valdationErrors) > 0 {
|
||||
if validationErrors := v.Validate(); len(validationErrors) > 0 {
|
||||
typeof := reflect.TypeOf(v)
|
||||
errString := strings.Join(valdationErrors, "\n")
|
||||
errString := strings.Join(validationErrors, "\n")
|
||||
return false, ErrNotValid{fmt.Sprint(typeof, ": ", errString)}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ func ArtifactsRoutes(prefix string) *web.Route {
|
|||
|
||||
m.Group(artifactRouteBase, func() {
|
||||
// retrieve, list and confirm artifacts
|
||||
m.Combo("").Get(r.listArtifacts).Post(r.getUploadArtifactURL).Patch(r.comfirmUploadArtifact)
|
||||
m.Combo("").Get(r.listArtifacts).Post(r.getUploadArtifactURL).Patch(r.confirmUploadArtifact)
|
||||
// handle container artifacts list and download
|
||||
m.Put("/{artifact_hash}/upload", r.uploadArtifact)
|
||||
// handle artifacts download
|
||||
|
|
@ -310,9 +310,9 @@ func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
|
|||
})
|
||||
}
|
||||
|
||||
// comfirmUploadArtifact confirm upload artifact.
|
||||
// confirmUploadArtifact confirm upload artifact.
|
||||
// if all chunks are uploaded, merge them to one file.
|
||||
func (ar artifactRoutes) comfirmUploadArtifact(ctx *ArtifactContext) {
|
||||
func (ar artifactRoutes) confirmUploadArtifact(ctx *ArtifactContext) {
|
||||
_, runID, ok := validateRunID(ctx)
|
||||
if !ok {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -271,12 +271,12 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
|
|||
|
||||
artifactName := req.Name
|
||||
|
||||
rententionDays := setting.Actions.ArtifactRetentionDays
|
||||
retentionDays := setting.Actions.ArtifactRetentionDays
|
||||
if req.ExpiresAt != nil {
|
||||
rententionDays = int64(time.Until(req.ExpiresAt.AsTime()).Hours() / 24)
|
||||
retentionDays = int64(time.Until(req.ExpiresAt.AsTime()).Hours() / 24)
|
||||
}
|
||||
// create or get artifact with name and path
|
||||
artifact, err := actions.CreateArtifact(ctx, ctx.ActionTask, artifactName, artifactName+".zip", rententionDays)
|
||||
artifact, err := actions.CreateArtifact(ctx, ctx.ActionTask, artifactName, artifactName+".zip", retentionDays)
|
||||
if err != nil {
|
||||
log.Error("Error create or get artifact: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error create or get artifact")
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
webhook_service "forgejo.org/services/webhook"
|
||||
)
|
||||
|
||||
// ListHooks list an organziation's webhooks
|
||||
// ListHooks list an organization's webhooks
|
||||
func ListHooks(ctx *context.APIContext) {
|
||||
// swagger:operation GET /orgs/{org}/hooks organization orgListHooks
|
||||
// ---
|
||||
|
|
|
|||
|
|
@ -357,7 +357,7 @@ func CreatePushMirror(ctx *context.APIContext, mirrorOption *api.CreatePushMirro
|
|||
}
|
||||
|
||||
if mirrorOption.UseSSH && (mirrorOption.RemoteUsername != "" || mirrorOption.RemotePassword != "") {
|
||||
ctx.Error(http.StatusBadRequest, "CreatePushMirror", "'use_ssh' is mutually exclusive with 'remote_username' and 'remote_passoword'")
|
||||
ctx.Error(http.StatusBadRequest, "CreatePushMirror", "'use_ssh' is mutually exclusive with 'remote_username' and 'remote_password'")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ func InitCodeSearchOptions(ctx *context.Context) (opts CodeSearchOptions) {
|
|||
// Also sets the ctx.Data fields "CodeSearchMode" and "CodeSearchOptions"
|
||||
//
|
||||
// NOTE:
|
||||
// This is seperate from `InitCodeSearchOptions`
|
||||
// This is separate from `InitCodeSearchOptions`
|
||||
// since this is specific the indexer and only used
|
||||
// where git-grep is not available.
|
||||
func CodeSearchIndexerMode(ctx *context.Context) (mode code_indexer.SearchMode) {
|
||||
|
|
|
|||
|
|
@ -313,7 +313,7 @@ func NewTeamPost(ctx *context.Context) {
|
|||
unitPerms := getUnitPerms(ctx.Req.Form, p)
|
||||
if p < perm.AccessModeAdmin {
|
||||
// if p is less than admin accessmode, then it should be general accessmode,
|
||||
// so we should calculate the minial accessmode from units accessmodes.
|
||||
// so we should calculate the minimal accessmode from units accessmodes.
|
||||
p = unit_model.MinUnitAccessMode(unitPerms)
|
||||
}
|
||||
|
||||
|
|
@ -480,7 +480,7 @@ func EditTeamPost(ctx *context.Context) {
|
|||
unitPerms := getUnitPerms(ctx.Req.Form, newAccessMode)
|
||||
if newAccessMode < perm.AccessModeAdmin {
|
||||
// if newAccessMode is less than admin accessmode, then it should be general accessmode,
|
||||
// so we should calculate the minial accessmode from units accessmodes.
|
||||
// so we should calculate the minimal accessmode from units accessmodes.
|
||||
newAccessMode = unit_model.MinUnitAccessMode(unitPerms)
|
||||
}
|
||||
isAuthChanged := false
|
||||
|
|
|
|||
|
|
@ -101,9 +101,9 @@ func getParentTreeFields(treePath string) (treeNames, treePaths []string) {
|
|||
}
|
||||
|
||||
// getSelectableEmailAddresses returns which emails can be used by the user as
|
||||
// email for a Git commiter.
|
||||
// email for a Git committer.
|
||||
func getSelectableEmailAddresses(ctx *context.Context) ([]*user_model.ActivatedEmailAddress, error) {
|
||||
// Retrieve emails that the user could use for commiter identity.
|
||||
// Retrieve emails that the user could use for committer identity.
|
||||
commitEmails, err := user_model.GetActivatedEmailAddresses(ctx, ctx.Doer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetActivatedEmailAddresses: %w", err)
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ func RemoveDependency(ctx *context.Context) {
|
|||
case "blocking":
|
||||
depType = issues_model.DependencyTypeBlocking
|
||||
default:
|
||||
ctx.Error(http.StatusBadRequest, "GetDependecyType")
|
||||
ctx.Error(http.StatusBadRequest, "GetDependencyType")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ func SetEditorconfigIfExists(ctx *context.Context) {
|
|||
if err != nil && !git.IsErrNotExist(err) {
|
||||
description := fmt.Sprintf("Error while getting .editorconfig file: %v", err)
|
||||
if err := system_model.CreateRepositoryNotice(description); err != nil {
|
||||
ctx.ServerError("ErrCreatingReporitoryNotice", err)
|
||||
ctx.ServerError("ErrCreatingRepositoryNotice", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ func NewDiffPatchPost(ctx *context.Context) {
|
|||
message += "\n\n" + form.CommitMessage
|
||||
}
|
||||
|
||||
gitIdenitity := getGitIdentity(ctx, form.CommitMailID, tplPatchFile, &form)
|
||||
gitIdentity := getGitIdentity(ctx, form.CommitMailID, tplPatchFile, &form)
|
||||
if ctx.Written() {
|
||||
return
|
||||
}
|
||||
|
|
@ -98,8 +98,8 @@ func NewDiffPatchPost(ctx *context.Context) {
|
|||
NewBranch: branchName,
|
||||
Message: message,
|
||||
Content: strings.ReplaceAll(form.Content, "\r", ""),
|
||||
Author: gitIdenitity,
|
||||
Committer: gitIdenitity,
|
||||
Author: gitIdentity,
|
||||
Committer: gitIdentity,
|
||||
})
|
||||
if err != nil {
|
||||
if git_model.IsErrBranchAlreadyExists(err) {
|
||||
|
|
|
|||
|
|
@ -687,9 +687,9 @@ func SearchRepo(ctx *context.Context) {
|
|||
|
||||
ctx.SetTotalCountHeader(count)
|
||||
|
||||
latestCommitStatuses, err := commitstatus_service.FindReposLastestCommitStatuses(ctx, repos)
|
||||
latestCommitStatuses, err := commitstatus_service.FindReposLatestCommitStatuses(ctx, repos)
|
||||
if err != nil {
|
||||
log.Error("FindReposLastestCommitStatuses: %v", err)
|
||||
log.Error("FindReposLatestCommitStatuses: %v", err)
|
||||
ctx.JSON(http.StatusInternalServerError, nil)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func SettingsCtxData(ctx *context.Context) {
|
|||
ctx.Data["CanUseSSHMirroring"] = git.HasSSHExecutable
|
||||
}
|
||||
|
||||
// Units show a repositorys unit settings page
|
||||
// Units show a repository's unit settings page
|
||||
func Units(ctx *context.Context) {
|
||||
ctx.Data["Title"] = ctx.Tr("repo.settings.units.units")
|
||||
ctx.Data["PageIsRepoSettingsUnits"] = true
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func TestServicesActions_transferLingeringLogs(t *testing.T) {
|
|||
unittest.AssertNotExistsBean(t, &dbfs_model.DbfsMeta{ID: lingeringLogID})
|
||||
}
|
||||
|
||||
// third pass is happilly doing nothing
|
||||
// third pass is happily doing nothing
|
||||
require.NoError(t, transferLingeringLogs(t.Context(), transferLingeringLogsOpts(now)))
|
||||
|
||||
// verify the tasks that are not to be garbage collected are still present
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ func checkJobWillRevisit(ctx context.Context, job *actions_model.ActionRunJob) (
|
|||
}
|
||||
|
||||
func checkJobRunsOnStaticMatrixError(ctx context.Context, job *actions_model.ActionRunJob) (bool, error) {
|
||||
// If a job has a `runs-on` field that references a matrix dimension like `runs-on: ${{ matrix.platorm }}`, and
|
||||
// If a job has a `runs-on` field that references a matrix dimension like `runs-on: ${{ matrix.platform }}`, and
|
||||
// `platform` is not part of the job's matrix at all, then it will be tagged as `HasIncompleteRunsOn` and will be
|
||||
// blocked forever. This only applies if the matrix is static -- that is, the job isn't also tagged
|
||||
// `HasIncompleteMatrix` and the matrix is yet to be fully defined.
|
||||
|
|
|
|||
|
|
@ -195,11 +195,11 @@ func UpdateTaskByState(ctx context.Context, runnerID int64, state *runnerv1.Task
|
|||
stepStates[v.Id] = v
|
||||
}
|
||||
|
||||
ctx, commiter, err := db.TxContext(ctx)
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer commiter.Close()
|
||||
defer committer.Close()
|
||||
|
||||
e := db.GetEngine(ctx)
|
||||
|
||||
|
|
@ -262,7 +262,7 @@ func UpdateTaskByState(ctx context.Context, runnerID int64, state *runnerv1.Task
|
|||
}
|
||||
}
|
||||
|
||||
if err := commiter.Commit(); err != nil {
|
||||
if err := committer.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ func ProcReceive(ctx context.Context, repo *repo_model.Repository, gitRepo *git.
|
|||
|
||||
// Get the anything after the refs/for/ prefix.
|
||||
baseBranchName := opts.RefFullNames[i].ForBranchName()
|
||||
curentTopicBranch := topicBranch
|
||||
currentTopicBranch := topicBranch
|
||||
|
||||
// If the reference was given in the format of refs/for/<target-branch>/<topic-branch>,
|
||||
// where <target-branch> and <topic-branch> can contain slashes, we need to iteratively
|
||||
|
|
@ -67,14 +67,14 @@ func ProcReceive(ctx context.Context, repo *repo_model.Repository, gitRepo *git.
|
|||
if !gitRepo.IsBranchExist(baseBranchName) {
|
||||
for p, v := range baseBranchName {
|
||||
if v == '/' && gitRepo.IsBranchExist(baseBranchName[:p]) && p != len(baseBranchName)-1 {
|
||||
curentTopicBranch = baseBranchName[p+1:]
|
||||
currentTopicBranch = baseBranchName[p+1:]
|
||||
baseBranchName = baseBranchName[:p]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(curentTopicBranch) == 0 {
|
||||
if len(currentTopicBranch) == 0 {
|
||||
results = append(results, private.HookProcReceiveRefResult{
|
||||
OriginalRef: opts.RefFullNames[i],
|
||||
OldOID: opts.OldCommitIDs[i],
|
||||
|
|
@ -86,10 +86,10 @@ func ProcReceive(ctx context.Context, repo *repo_model.Repository, gitRepo *git.
|
|||
|
||||
// Include the user's name in the head branch, to avoid conflicts
|
||||
// with other users.
|
||||
headBranch := curentTopicBranch
|
||||
headBranch := currentTopicBranch
|
||||
userName := strings.ToLower(opts.UserName)
|
||||
if !strings.HasPrefix(curentTopicBranch, userName+"/") {
|
||||
headBranch = userName + "/" + curentTopicBranch
|
||||
if !strings.HasPrefix(currentTopicBranch, userName+"/") {
|
||||
headBranch = userName + "/" + currentTopicBranch
|
||||
}
|
||||
|
||||
// Check if a AGit pull request already exist for this branch.
|
||||
|
|
|
|||
|
|
@ -324,7 +324,7 @@ func (source *Source) SearchEntry(name, passwd string, directBind bool) *SearchR
|
|||
}
|
||||
|
||||
isAttributeSSHPublicKeySet := len(strings.TrimSpace(source.AttributeSSHPublicKey)) > 0
|
||||
isAtributeAvatarSet := len(strings.TrimSpace(source.AttributeAvatar)) > 0
|
||||
isAttributeAvatarSet := len(strings.TrimSpace(source.AttributeAvatar)) > 0
|
||||
|
||||
attribs := []string{source.AttributeUsername, source.AttributeName, source.AttributeSurname, source.AttributeMail}
|
||||
if len(strings.TrimSpace(source.UserUID)) > 0 {
|
||||
|
|
@ -333,7 +333,7 @@ func (source *Source) SearchEntry(name, passwd string, directBind bool) *SearchR
|
|||
if isAttributeSSHPublicKeySet {
|
||||
attribs = append(attribs, source.AttributeSSHPublicKey)
|
||||
}
|
||||
if isAtributeAvatarSet {
|
||||
if isAttributeAvatarSet {
|
||||
attribs = append(attribs, source.AttributeAvatar)
|
||||
}
|
||||
|
||||
|
|
@ -375,7 +375,7 @@ func (source *Source) SearchEntry(name, passwd string, directBind bool) *SearchR
|
|||
isRestricted = checkRestricted(l, source, userDN)
|
||||
}
|
||||
|
||||
if isAtributeAvatarSet {
|
||||
if isAttributeAvatarSet {
|
||||
Avatar = sr.Entries[0].GetRawAttributeValue(source.AttributeAvatar)
|
||||
}
|
||||
|
||||
|
|
@ -442,13 +442,13 @@ func (source *Source) SearchEntries() ([]*SearchResult, error) {
|
|||
userFilter := fmt.Sprintf(source.Filter, "*")
|
||||
|
||||
isAttributeSSHPublicKeySet := len(strings.TrimSpace(source.AttributeSSHPublicKey)) > 0
|
||||
isAtributeAvatarSet := len(strings.TrimSpace(source.AttributeAvatar)) > 0
|
||||
isAttributeAvatarSet := len(strings.TrimSpace(source.AttributeAvatar)) > 0
|
||||
|
||||
attribs := []string{source.AttributeUsername, source.AttributeName, source.AttributeSurname, source.AttributeMail, source.UserUID}
|
||||
if isAttributeSSHPublicKeySet {
|
||||
attribs = append(attribs, source.AttributeSSHPublicKey)
|
||||
}
|
||||
if isAtributeAvatarSet {
|
||||
if isAttributeAvatarSet {
|
||||
attribs = append(attribs, source.AttributeAvatar)
|
||||
}
|
||||
|
||||
|
|
@ -504,7 +504,7 @@ func (source *Source) SearchEntries() ([]*SearchResult, error) {
|
|||
user.SSHPublicKey = v.GetAttributeValues(source.AttributeSSHPublicKey)
|
||||
}
|
||||
|
||||
if isAtributeAvatarSet {
|
||||
if isAttributeAvatarSet {
|
||||
user.Avatar = v.GetRawAttributeValue(source.AttributeAvatar)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -469,7 +469,7 @@ func (ctx *APIContext) IsUserRepoWriter(unitTypes []unit.Type) bool {
|
|||
|
||||
// Returns true when the requests indicates that it accepts a Github response.
|
||||
// This should be used to return information in the way that the Github API
|
||||
// specifies it. Avoids breaking compatability with non-Github API clients.
|
||||
// specifies it. Avoids breaking compatibility with non-Github API clients.
|
||||
func (ctx *APIContext) AcceptsGithubResponse() bool {
|
||||
return ctx.Req.Header.Get("Accept") == "application/vnd.github+json"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func garbageCollectLFSCheck(ctx context.Context, logger log.Logger, autofix bool
|
|||
OlderThan: time.Now().Add(-24 * time.Hour * 7),
|
||||
// We don't set the UpdatedLessRecentlyThan because we want to do a full GC
|
||||
}); err != nil {
|
||||
logger.Error("Couldn't garabage collect LFS objects: %v", err)
|
||||
logger.Error("Couldn't garbage collect LFS objects: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -195,12 +195,12 @@ func TestAbbreviatedComment(t *testing.T) {
|
|||
expected: "First line of comment",
|
||||
},
|
||||
{
|
||||
name: "before clip boundry",
|
||||
name: "before clip boundary",
|
||||
input: strings.Repeat("abc ", 50),
|
||||
expected: strings.Repeat("abc ", 50),
|
||||
},
|
||||
{
|
||||
name: "after clip boundry",
|
||||
name: "after clip boundary",
|
||||
input: strings.Repeat("abc ", 51),
|
||||
expected: "abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc…",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ func (d *DiffLine) GetExpandDirection() DiffLineExpandDirection {
|
|||
}
|
||||
|
||||
func getDiffLineSectionInfo(treePath, line string, lastLeftIdx, lastRightIdx int) *DiffLineSectionInfo {
|
||||
leftLine, leftHunk, rightLine, righHunk := git.ParseDiffHunkString(line)
|
||||
leftLine, leftHunk, rightLine, rightHunk := git.ParseDiffHunkString(line)
|
||||
|
||||
return &DiffLineSectionInfo{
|
||||
Path: treePath,
|
||||
|
|
@ -185,7 +185,7 @@ func getDiffLineSectionInfo(treePath, line string, lastLeftIdx, lastRightIdx int
|
|||
LeftIdx: leftLine,
|
||||
RightIdx: rightLine,
|
||||
LeftHunkSize: leftHunk,
|
||||
RightHunkSize: righHunk,
|
||||
RightHunkSize: rightHunk,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,10 +20,10 @@ import (
|
|||
// DeleteNotPassedAssignee deletes all assignees who aren't passed via the "assignees" array
|
||||
func DeleteNotPassedAssignee(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, assignees []*user_model.User) (err error) {
|
||||
var found bool
|
||||
oriAssignes := make([]*user_model.User, len(issue.Assignees))
|
||||
_ = copy(oriAssignes, issue.Assignees)
|
||||
oriAssignees := make([]*user_model.User, len(issue.Assignees))
|
||||
_ = copy(oriAssignees, issue.Assignees)
|
||||
|
||||
for _, assignee := range oriAssignes {
|
||||
for _, assignee := range oriAssignees {
|
||||
found = false
|
||||
for _, alreadyAssignee := range assignees {
|
||||
if assignee.ID == alreadyAssignee.ID {
|
||||
|
|
@ -230,12 +230,12 @@ func TeamReviewRequest(ctx context.Context, issue *issues_model.Issue, doer *use
|
|||
return comment, teamReviewRequestNotify(ctx, issue, doer, reviewer, isAdd, comment)
|
||||
}
|
||||
|
||||
func ReviewRequestNotify(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, reviewNotifers []*ReviewRequestNotifier) {
|
||||
for _, reviewNotifer := range reviewNotifers {
|
||||
if reviewNotifer.Reviewer != nil {
|
||||
notify_service.PullRequestReviewRequest(ctx, issue.Poster, issue, reviewNotifer.Reviewer, reviewNotifer.IsAdd, reviewNotifer.Comment)
|
||||
} else if reviewNotifer.ReviewTeam != nil {
|
||||
if err := teamReviewRequestNotify(ctx, issue, issue.Poster, reviewNotifer.ReviewTeam, reviewNotifer.IsAdd, reviewNotifer.Comment); err != nil {
|
||||
func ReviewRequestNotify(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, reviewNotifiers []*ReviewRequestNotifier) {
|
||||
for _, reviewNotifier := range reviewNotifiers {
|
||||
if reviewNotifier.Reviewer != nil {
|
||||
notify_service.PullRequestReviewRequest(ctx, issue.Poster, issue, reviewNotifier.Reviewer, reviewNotifier.IsAdd, reviewNotifier.Comment)
|
||||
} else if reviewNotifier.ReviewTeam != nil {
|
||||
if err := teamReviewRequestNotify(ctx, issue, issue.Poster, reviewNotifier.ReviewTeam, reviewNotifier.IsAdd, reviewNotifier.Comment); err != nil {
|
||||
log.Error("teamReviewRequestNotify: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,17 +85,17 @@ func ChangeTitle(ctx context.Context, issue *issues_model.Issue, doer *user_mode
|
|||
return err
|
||||
}
|
||||
|
||||
var reviewNotifers []*ReviewRequestNotifier
|
||||
var reviewNotifiers []*ReviewRequestNotifier
|
||||
if issue.IsPull && issues_model.HasWorkInProgressPrefix(oldTitle) && !issues_model.HasWorkInProgressPrefix(title) {
|
||||
var err error
|
||||
reviewNotifers, err = PullRequestCodeOwnersReview(ctx, issue, issue.PullRequest)
|
||||
reviewNotifiers, err = PullRequestCodeOwnersReview(ctx, issue, issue.PullRequest)
|
||||
if err != nil {
|
||||
log.Error("PullRequestCodeOwnersReview: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
notify_service.IssueChangeTitle(ctx, doer, issue, oldTitle)
|
||||
ReviewRequestNotify(ctx, issue, issue.Poster, reviewNotifers)
|
||||
ReviewRequestNotify(ctx, issue, issue.Poster, reviewNotifiers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -225,7 +225,7 @@ func assertRepositoryEqual(t *testing.T, expected, actual *base.Repository) {
|
|||
|
||||
func assertReviewEqual(t *testing.T, expected, actual *base.Review) {
|
||||
assert.Equal(t, expected.ID, actual.ID, "ID")
|
||||
assert.Equal(t, expected.IssueIndex, actual.IssueIndex, "IsssueIndex")
|
||||
assert.Equal(t, expected.IssueIndex, actual.IssueIndex, "IssueIndex")
|
||||
assert.Equal(t, expected.ReviewerID, actual.ReviewerID, "ReviewerID")
|
||||
assert.Equal(t, expected.ReviewerName, actual.ReviewerName, "ReviewerName")
|
||||
assert.Equal(t, expected.Official, actual.Official, "Official")
|
||||
|
|
|
|||
|
|
@ -20,11 +20,11 @@ import (
|
|||
|
||||
// DeleteOrganization completely and permanently deletes everything of organization.
|
||||
func DeleteOrganization(ctx context.Context, org *org_model.Organization, purge bool) error {
|
||||
ctx, commiter, err := db.TxContext(ctx)
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer commiter.Close()
|
||||
defer committer.Close()
|
||||
|
||||
if purge {
|
||||
err := repo_service.DeleteOwnerRepositoriesDirectly(ctx, org.AsUser())
|
||||
|
|
@ -52,7 +52,7 @@ func DeleteOrganization(ctx context.Context, org *org_model.Organization, purge
|
|||
return fmt.Errorf("DeleteOrganization: %w", err)
|
||||
}
|
||||
|
||||
if err := commiter.Commit(); err != nil {
|
||||
if err := committer.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ import (
|
|||
var (
|
||||
// errWriteAfterRead occurs if Write is called after a read operation
|
||||
errWriteAfterRead = errors.New("write is unsupported after a read operation")
|
||||
// errOffsetMissmatch occurs if the file offset is different than the model
|
||||
errOffsetMissmatch = errors.New("offset mismatch between file and model")
|
||||
// errOffsetMismatch occurs if the file offset is different than the model
|
||||
errOffsetMismatch = errors.New("offset mismatch between file and model")
|
||||
)
|
||||
|
||||
// BlobUploader handles chunked blob uploads
|
||||
|
|
@ -77,7 +77,7 @@ func (u *BlobUploader) Append(ctx context.Context, r io.Reader) error {
|
|||
return err
|
||||
}
|
||||
if offset != u.BytesReceived {
|
||||
return errOffsetMissmatch
|
||||
return errOffsetMismatch
|
||||
}
|
||||
|
||||
n, err := io.Copy(io.MultiWriter(u.file, u.MultiHasher), r)
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ func (t *testPatchContext) LoadHeadRevision(ctx context.Context, pr *issues_mode
|
|||
|
||||
// getTestPatchCtx constructs a new testpatch context for the given pull request.
|
||||
// If `onBare` is true, then the context will use the base repository that does
|
||||
// not contain a working tree. Otherwise a temprorary repository is created that
|
||||
// not contain a working tree. Otherwise a temporary repository is created that
|
||||
// contains a working tree.
|
||||
func getTestPatchCtx(ctx context.Context, pr *issues_model.PullRequest, onBare bool) (*testPatchContext, error) {
|
||||
testPatchCtx := &testPatchContext{
|
||||
|
|
@ -419,7 +419,7 @@ func MergeTree(ctx context.Context, gitRepo *git.Repository, base, ours, theirs
|
|||
return treeOID, git.IsErrorExitCode(gitErr, 1), nil, nil
|
||||
}
|
||||
|
||||
// Remove last NULL-byte from conflicted file info, then split with NULL byte as seperator.
|
||||
// Remove last NULL-byte from conflicted file info, then split with NULL byte as separator.
|
||||
return treeOID, true, strings.Split(conflictedFileInfo[:len(conflictedFileInfo)-1], "\x00"), nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ func NewPullRequest(ctx context.Context, repo *repo_model.Repository, issue *iss
|
|||
}
|
||||
defer baseGitRepo.Close()
|
||||
|
||||
var reviewNotifers []*issue_service.ReviewRequestNotifier
|
||||
var reviewNotifiers []*issue_service.ReviewRequestNotifier
|
||||
if err := db.WithTx(ctx, func(ctx context.Context) error {
|
||||
if err := issues_model.NewPullRequest(ctx, repo, issue, labelIDs, uuids, pr); err != nil {
|
||||
return err
|
||||
|
|
@ -124,7 +124,7 @@ func NewPullRequest(ctx context.Context, repo *repo_model.Repository, issue *iss
|
|||
}
|
||||
|
||||
if !pr.IsWorkInProgress(ctx) {
|
||||
reviewNotifers, err = issue_service.PullRequestCodeOwnersReview(ctx, issue, pr)
|
||||
reviewNotifiers, err = issue_service.PullRequestCodeOwnersReview(ctx, issue, pr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -139,7 +139,7 @@ func NewPullRequest(ctx context.Context, repo *repo_model.Repository, issue *iss
|
|||
}
|
||||
baseGitRepo.Close() // close immediately to avoid notifications will open the repository again
|
||||
|
||||
issue_service.ReviewRequestNotify(ctx, issue, issue.Poster, reviewNotifers)
|
||||
issue_service.ReviewRequestNotify(ctx, issue, issue.Poster, reviewNotifiers)
|
||||
|
||||
mentions, err := issues_model.FindAndUpdateIssueMentions(ctx, issue, issue.Poster, issue.Content)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ func getUsersByLoginName(ctx context.Context, name string) ([]*user_model.User,
|
|||
// The remote user has:
|
||||
//
|
||||
// Type UserTypeRemoteUser
|
||||
// LogingType Remote
|
||||
// LoginType Remote
|
||||
// LoginName set to the unique identifier of the originating authentication source
|
||||
// LoginSource set to the Remote source that can be matched against an OAuth2 source
|
||||
//
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ import (
|
|||
shared_automerge "forgejo.org/services/shared/automerge"
|
||||
)
|
||||
|
||||
func getCacheKey(repoID int64, brancheName string) string {
|
||||
hashBytes := sha256.Sum256([]byte(fmt.Sprintf("%d:%s", repoID, brancheName)))
|
||||
func getCacheKey(repoID int64, branchName string) string {
|
||||
hashBytes := sha256.Sum256([]byte(fmt.Sprintf("%d:%s", repoID, branchName)))
|
||||
return fmt.Sprintf("commit_status:%x", hashBytes)
|
||||
}
|
||||
|
||||
|
|
@ -125,8 +125,8 @@ func CreateCommitStatus(ctx context.Context, repo *repo_model.Repository, creato
|
|||
return nil
|
||||
}
|
||||
|
||||
// FindReposLastestCommitStatuses loading repository default branch latest combined commit status with cache
|
||||
func FindReposLastestCommitStatuses(ctx context.Context, repos []*repo_model.Repository) ([]*git_model.CommitStatus, error) {
|
||||
// FindReposLatestCommitStatuses loading repository default branch latest combined commit status with cache
|
||||
func FindReposLatestCommitStatuses(ctx context.Context, repos []*repo_model.Repository) ([]*git_model.CommitStatus, error) {
|
||||
if len(repos) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -84,10 +84,10 @@ func TestTransformers(t *testing.T) {
|
|||
|
||||
for i, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tranform := defaultTransformers[i]
|
||||
assert.Equal(t, tt.name, tranform.Name)
|
||||
transform := defaultTransformers[i]
|
||||
assert.Equal(t, tt.name, transform.Name)
|
||||
|
||||
got := tranform.Transform(input)
|
||||
got := transform.Transform(input)
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,8 +53,8 @@ func TestQueueUnique(t *testing.T) {
|
|||
})
|
||||
|
||||
// Queue object with the same value multiple times... this test works OK with just 3 items, but with the queue
|
||||
// processing happening in tha background it's possible that multiple invocations of the registered function can
|
||||
// happen. So we'll test this by queuing a large number and ensuring that recalcs occured less -- usually much
|
||||
// processing happening in the background it's possible that multiple invocations of the registered function can
|
||||
// happen. So we'll test this by queuing a large number and ensuring that recalcs occurred less -- usually much
|
||||
// less, like once or twice.
|
||||
for range 300 {
|
||||
safePush(t.Context(), recalcRequest{
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ func DeleteUser(ctx context.Context, u *user_model.User, purge bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
hasPrincipialSSHKey, err := db.GetEngine(ctx).Where("owner_id = ? AND type = ?", u.ID, asymkey_model.KeyTypePrincipal).Table("public_key").Exist()
|
||||
hasPrincipalSSHKey, err := db.GetEngine(ctx).Where("owner_id = ? AND type = ?", u.ID, asymkey_model.KeyTypePrincipal).Table("public_key").Exist()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -322,7 +322,7 @@ func DeleteUser(ctx context.Context, u *user_model.User, purge bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
if hasPrincipialSSHKey {
|
||||
if hasPrincipalSSHKey {
|
||||
if err = asymkey_model.RewriteAllPrincipalKeys(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -360,7 +360,7 @@ func TestDeleteInactiveUsers(t *testing.T) {
|
|||
unittest.AssertExistsIf(t, false, oldUser)
|
||||
unittest.AssertExistsIf(t, false, oldEmail)
|
||||
|
||||
// User not older than a minute shouldn't be deleted and their emaill address should still exist.
|
||||
// User not older than a minute shouldn't be deleted and their email address should still exist.
|
||||
unittest.AssertExistsIf(t, true, newUser)
|
||||
unittest.AssertExistsIf(t, true, newEmail)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ test('org add and remove team repositories', async ({page}) => {
|
|||
await page.getByRole('button', {name: 'Add all'}).click();
|
||||
await expect(page.locator('#addall-repos-modal')).toBeVisible();
|
||||
await screenshot(page, page.locator('#addall-repos-modal'));
|
||||
// Addd all repositories.
|
||||
// Add all repositories.
|
||||
await page.getByRole('button', {name: 'Yes'}).click();
|
||||
|
||||
// Check that there are three repositories.
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import {screenshot} from './shared/screenshots.ts';
|
|||
|
||||
test.use({user: 'user2'});
|
||||
|
||||
test('Migration type seleciton screen', async ({page}) => {
|
||||
test('Migration type selection screen', async ({page}) => {
|
||||
await page.goto('/repo/migrate');
|
||||
|
||||
// For branding purposes, it is desired that `gitea-` prefixes in SVGs are
|
||||
|
|
|
|||
|
|
@ -72,8 +72,8 @@ func TestGPGKeys(t *testing.T) {
|
|||
t.Run("CreateInvalidGPGKey", func(t *testing.T) {
|
||||
testCreateInvalidGPGKey(t, tc.makeRequest, tc.token, tc.results[4])
|
||||
})
|
||||
t.Run("CreateNoneRegistredEmailGPGKey", func(t *testing.T) {
|
||||
testCreateNoneRegistredEmailGPGKey(t, tc.makeRequest, tc.token, tc.results[5])
|
||||
t.Run("CreateNoneRegisteredEmailGPGKey", func(t *testing.T) {
|
||||
testCreateNoneRegisteredEmailGPGKey(t, tc.makeRequest, tc.token, tc.results[5])
|
||||
})
|
||||
t.Run("CreateValidGPGKey", func(t *testing.T) {
|
||||
testCreateValidGPGKey(t, tc.makeRequest, tc.token, tc.results[6])
|
||||
|
|
@ -188,7 +188,7 @@ func testCreateInvalidGPGKey(t *testing.T, makeRequest makeRequestFunc, token st
|
|||
testCreateGPGKey(t, makeRequest, token, expected, "invalid_key")
|
||||
}
|
||||
|
||||
func testCreateNoneRegistredEmailGPGKey(t *testing.T, makeRequest makeRequestFunc, token string, expected int) {
|
||||
func testCreateNoneRegisteredEmailGPGKey(t *testing.T, makeRequest makeRequestFunc, token string, expected int) {
|
||||
testCreateGPGKey(t, makeRequest, token, expected, `-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQENBFmGUygBCACjCNbKvMGgp0fd5vyFW9olE1CLCSyyF9gQN2hSuzmZLuAZF2Kh
|
||||
|
|
|
|||
|
|
@ -202,7 +202,7 @@ func TestAPINotificationPUT(t *testing.T) {
|
|||
assert.False(t, apiNL[0].Pinned)
|
||||
|
||||
//
|
||||
// Now nofication ID 2 is the first in the list and is unread.
|
||||
// Now notification ID 2 is the first in the list and is unread.
|
||||
//
|
||||
req = NewRequest(t, "GET", "/api/v1/notifications?all=true").
|
||||
AddTokenAuth(token)
|
||||
|
|
|
|||
|
|
@ -742,7 +742,7 @@ func TestPackageWithTwoFactor(t *testing.T) {
|
|||
return passcode
|
||||
}()
|
||||
|
||||
url := fmt.Sprintf("/api/v1/packages/%s", normalUser.Name) // a public packge to test
|
||||
url := fmt.Sprintf("/api/v1/packages/%s", normalUser.Name) // a public package to test
|
||||
req := NewRequest(t, "GET", url)
|
||||
if doer != nil {
|
||||
req.AddBasicAuth(doer.Name)
|
||||
|
|
|
|||
|
|
@ -336,7 +336,7 @@ func TestAPIPushMirrorSSH(t *testing.T) {
|
|||
|
||||
var apiError api.APIError
|
||||
DecodeJSON(t, resp, &apiError)
|
||||
assert.Equal(t, "'use_ssh' is mutually exclusive with 'remote_username' and 'remote_passoword'", apiError.Message)
|
||||
assert.Equal(t, "'use_ssh' is mutually exclusive with 'remote_username' and 'remote_password'", apiError.Message)
|
||||
})
|
||||
|
||||
t.Run("SSH not available", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAPIRepoActivitiyFeeds(t *testing.T) {
|
||||
func TestAPIRepoActivityFeeds(t *testing.T) {
|
||||
defer tests.PrepareTestEnv(t)()
|
||||
|
||||
owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ func TestAPIRepoTagDeleteProtection(t *testing.T) {
|
|||
require.Equal(t, "v1.1", tags[0].Name)
|
||||
|
||||
// Create a tag protection rule for the repo so that `user2` cannot create/remove tags, even if they have write
|
||||
// perms to the repo... which they do becase they own it.
|
||||
// perms to the repo... which they do because they own it.
|
||||
req = NewRequestWithJSON(t, "POST",
|
||||
fmt.Sprintf("/api/v1/repos/%s/%s/tag_protections", user.Name, repoName),
|
||||
&api.CreateTagProtectionOption{
|
||||
|
|
|
|||
|
|
@ -535,10 +535,10 @@ func runTestCase(t *testing.T, testCase *requiredScopeTestCase, user *user_model
|
|||
if unauthorizedLevel == auth_model.NoAccess {
|
||||
continue
|
||||
}
|
||||
cateogoryUnauthorizedScopes := auth_model.GetRequiredScopes(
|
||||
categoryUnauthorizedScopes := auth_model.GetRequiredScopes(
|
||||
unauthorizedLevel,
|
||||
category)
|
||||
unauthorizedScopes = append(unauthorizedScopes, cateogoryUnauthorizedScopes...)
|
||||
unauthorizedScopes = append(unauthorizedScopes, categoryUnauthorizedScopes...)
|
||||
}
|
||||
|
||||
accessToken := createAPIAccessTokenWithoutCleanUp(t, "test-token", user, unauthorizedScopes)
|
||||
|
|
|
|||
|
|
@ -123,28 +123,28 @@ func TestCanReadUser(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestUnknowUser(t *testing.T) {
|
||||
func TestUnknownUser(t *testing.T) {
|
||||
defer tests.PrepareTestEnv(t)()
|
||||
|
||||
session := loginUser(t, "user1")
|
||||
token := getTokenForLoggedInUser(t, session, auth_model.AccessTokenScopeReadUser, auth_model.AccessTokenScopeReadOrganization)
|
||||
|
||||
req := NewRequest(t, "GET", "/api/v1/users/unknow/orgs/org25/permissions").
|
||||
req := NewRequest(t, "GET", "/api/v1/users/unknown/orgs/org25/permissions").
|
||||
AddTokenAuth(token)
|
||||
resp := MakeRequest(t, req, http.StatusNotFound)
|
||||
|
||||
var apiError api.APIError
|
||||
DecodeJSON(t, resp, &apiError)
|
||||
assert.Equal(t, "user redirect does not exist [name: unknow]", apiError.Message)
|
||||
assert.Equal(t, "user redirect does not exist [name: unknown]", apiError.Message)
|
||||
}
|
||||
|
||||
func TestUnknowOrganization(t *testing.T) {
|
||||
func TestUnknownOrganization(t *testing.T) {
|
||||
defer tests.PrepareTestEnv(t)()
|
||||
|
||||
session := loginUser(t, "user1")
|
||||
token := getTokenForLoggedInUser(t, session, auth_model.AccessTokenScopeReadUser, auth_model.AccessTokenScopeReadOrganization)
|
||||
|
||||
req := NewRequest(t, "GET", "/api/v1/users/user1/orgs/unknow/permissions").
|
||||
req := NewRequest(t, "GET", "/api/v1/users/user1/orgs/unknown/permissions").
|
||||
AddTokenAuth(token)
|
||||
resp := MakeRequest(t, req, http.StatusNotFound)
|
||||
var apiError api.APIError
|
||||
|
|
|
|||
|
|
@ -136,8 +136,8 @@ func TestLTAExpiry(t *testing.T) {
|
|||
|
||||
sess := loginUserWithPasswordRemember(t, user.Name, userPassword, true)
|
||||
|
||||
ltaCookieValie := GetLTACookieValue(t, sess)
|
||||
lookupKey, _, found := strings.Cut(ltaCookieValie, ":")
|
||||
ltaCookieValue := GetLTACookieValue(t, sess)
|
||||
lookupKey, _, found := strings.Cut(ltaCookieValue, ":")
|
||||
assert.True(t, found)
|
||||
|
||||
// Ensure it's not expired.
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ func TestFeed(t *testing.T) {
|
|||
data := resp.Body.String()
|
||||
assert.Contains(t, data, `<feed xmlns="http://www.w3.org/2005/Atom"`)
|
||||
assert.Contains(t, data, "This is a very long text, so lets scream together: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
||||
assert.Contains(t, data, "Well, this test is short | succient | distinct.")
|
||||
assert.Contains(t, data, "Well, this test is short | succinct | distinct.")
|
||||
})
|
||||
t.Run("RSS", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
|
@ -87,7 +87,7 @@ func TestFeed(t *testing.T) {
|
|||
data := resp.Body.String()
|
||||
assert.Contains(t, data, `<rss version="2.0"`)
|
||||
assert.Contains(t, data, "This is a very long text, so lets scream together: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
||||
assert.Contains(t, data, "Well, this test is short | succient | distinct.")
|
||||
assert.Contains(t, data, "Well, this test is short | succinct | distinct.")
|
||||
})
|
||||
})
|
||||
t.Run("Branch", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -15,4 +15,4 @@
|
|||
repo_id: 1 # public
|
||||
is_private: false
|
||||
created_unix: 1680454039
|
||||
content: '["1","Well, this test is short | succient | distinct."]'
|
||||
content: '["1","Well, this test is short | succinct | distinct."]'
|
||||
|
|
|
|||
|
|
@ -1479,7 +1479,7 @@ func TestSignUpViaOAuthLinking2FA(t *testing.T) {
|
|||
assert.Equal(t, "/user/webauthn", test.RedirectURL(resp))
|
||||
})
|
||||
|
||||
t.Run("Case-insenstive username", func(t *testing.T) {
|
||||
t.Run("Case-insensitive username", func(t *testing.T) {
|
||||
defer mockCompleteUserAuth(func(res http.ResponseWriter, req *http.Request) (goth.User, error) {
|
||||
return goth.User{
|
||||
Provider: gitlabName,
|
||||
|
|
@ -1551,7 +1551,7 @@ func TestAccessTokenWithPKCE(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Incorrect code verfifier", func(t *testing.T) {
|
||||
t.Run("Incorrect code verifier", func(t *testing.T) {
|
||||
req := NewRequestWithValues(t, "POST", "/login/oauth/access_token", map[string]string{
|
||||
"client_id": "ce5a1322-42a7-11ed-b878-0242ac120002",
|
||||
"code": u.Query().Get("code"),
|
||||
|
|
|
|||
|
|
@ -148,8 +148,8 @@ func TestPatchStatus(t *testing.T) {
|
|||
t.Helper()
|
||||
var found *issues_model.PullRequest
|
||||
assert.Eventually(t, func() bool {
|
||||
examplar := pr
|
||||
found = unittest.AssertExistsAndLoadBean(t, &examplar, flow)
|
||||
exemplar := pr
|
||||
found = unittest.AssertExistsAndLoadBean(t, &exemplar, flow)
|
||||
return found.Status == issues_model.PullRequestStatusConflict
|
||||
}, time.Second*30, time.Millisecond*200)
|
||||
return found
|
||||
|
|
@ -297,7 +297,7 @@ can buy me/us a Paulaner Spezi in return. ~sdomi, Project SERVFAIL`), 0o6
|
|||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
require.NoError(t, git.NewCommand(t.Context(), "push", "fork", "HEAD:protected").Run(&git.RunOpts{Dir: dstPath}))
|
||||
testPullCreateDirectly(t, session, repo.OwnerName, repo.Name, repo.DefaultBranch, forkRepo.OwnerName, forkRepo.Name, "protected", "accros repo protected")
|
||||
testPullCreateDirectly(t, session, repo.OwnerName, repo.Name, repo.DefaultBranch, forkRepo.OwnerName, forkRepo.Name, "protected", "across repo protected")
|
||||
|
||||
test(t, unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{BaseRepoID: repo.ID, HeadRepoID: forkRepo.ID, HeadBranch: "protected"}, "flow = 0"))
|
||||
})
|
||||
|
|
@ -339,7 +339,7 @@ can buy me/us a Paulaner Spezi in return. ~sdomi, Project SERVFAIL`), 0o6
|
|||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
require.NoError(t, git.NewCommand(t.Context(), "push", "fork", "HEAD:ancestor").Run(&git.RunOpts{Dir: dstPath}))
|
||||
testPullCreateDirectly(t, session, repo.OwnerName, repo.Name, "protected", forkRepo.OwnerName, forkRepo.Name, "ancestor", "accros repo ancestor")
|
||||
testPullCreateDirectly(t, session, repo.OwnerName, repo.Name, "protected", forkRepo.OwnerName, forkRepo.Name, "ancestor", "across repo ancestor")
|
||||
|
||||
test(t, unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{BaseRepoID: repo.ID, HeadRepoID: forkRepo.ID, HeadBranch: "ancestor"}, "flow = 0"))
|
||||
})
|
||||
|
|
|
|||
|
|
@ -365,7 +365,7 @@ func TestCantMergeUnrelated(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
sha := strings.TrimSpace(stdout.String())
|
||||
|
||||
_, _, err = git.NewCommand(git.DefaultContext, "update-index", "--add", "--replace", "--cacheinfo").AddDynamicArguments("100644", sha, "somewher-over-the-rainbow").RunStdString(&git.RunOpts{Dir: path})
|
||||
_, _, err = git.NewCommand(git.DefaultContext, "update-index", "--add", "--replace", "--cacheinfo").AddDynamicArguments("100644", sha, "somewhere-over-the-rainbow").RunStdString(&git.RunOpts{Dir: path})
|
||||
require.NoError(t, err)
|
||||
|
||||
treeSha, _, err := git.NewCommand(git.DefaultContext, "write-tree").RunStdString(&git.RunOpts{Dir: path})
|
||||
|
|
|
|||
|
|
@ -23,14 +23,14 @@ func TestRepoCollaborators(t *testing.T) {
|
|||
response := session.MakeRequest(t, NewRequest(t, "GET", "/user2/repo1/settings/collaboration"), http.StatusOK)
|
||||
page := NewHTMLParser(t, response.Body).Find(".repo-setting-content")
|
||||
|
||||
// Veirfy header
|
||||
// Verify header
|
||||
assert.Equal(t, "Collaborators", strings.TrimSpace(page.Find("h4").Text()))
|
||||
|
||||
// Veirfy button text
|
||||
// Verify button text
|
||||
page = page.Find("#repo-collab-form")
|
||||
assert.Equal(t, "Add collaborator", strings.TrimSpace(page.Find("button.primary").Text()))
|
||||
|
||||
// Veirfy placeholder
|
||||
// Verify placeholder
|
||||
placeholder, exists := page.Find("#search-user-box input").Attr("placeholder")
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, "Search users…", placeholder)
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ func TestRepoCommitsWithTags(t *testing.T) {
|
|||
})
|
||||
assert.True(t, tagFound, "Should find v1.1 tag")
|
||||
|
||||
// 3. tags appear after the commit messsage and status indicators
|
||||
// 3. tags appear after the commit message and status indicators
|
||||
messageHTML, _ := messageCell.Html()
|
||||
messageWrapperPos := strings.Index(messageHTML, "message-wrapper")
|
||||
ellipsisButtonPos := strings.Index(messageHTML, "ellipsis-button")
|
||||
|
|
|
|||
Loading…
Reference in a new issue