summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2022-05-04 09:09:02 +0000
committerGitLab Bot <gitlab-bot@gitlab.com>2022-05-04 09:09:02 +0000
commit48640cf76a1ee0cd515e259d8f3eb2de25ba01c3 (patch)
tree84719b9b1f23a396298b0774ed8920cb401426d6
parent7d4987ae65374a40ab540ad825da3c33d8bab6df (diff)
downloadgitlab-ce-48640cf76a1ee0cd515e259d8f3eb2de25ba01c3.tar.gz
Add latest changes from gitlab-org/gitlab@master
-rw-r--r--GITALY_SERVER_VERSION2
-rw-r--r--app/assets/javascripts/api/user_api.js12
-rw-r--r--app/assets/javascripts/boards/components/config_toggle.vue4
-rw-r--r--app/assets/javascripts/boards/index.js1
-rw-r--r--app/assets/javascripts/boards/stores/getters.js14
-rw-r--r--app/assets/javascripts/graphql_shared/possible_types.json4
-rw-r--r--app/assets/javascripts/lib/utils/users_cache.js11
-rw-r--r--app/assets/javascripts/runner/components/registration/registration_dropdown.vue2
-rw-r--r--app/assets/javascripts/user_popovers.js15
-rw-r--r--app/assets/javascripts/vue_shared/components/user_popover/user_popover.vue89
-rw-r--r--app/controllers/repositories/lfs_storage_controller.rb14
-rw-r--r--app/helpers/application_settings_helper.rb26
-rw-r--r--app/helpers/boards_helper.rb1
-rw-r--r--app/views/admin/application_settings/_signin.html.haml4
-rw-r--r--config/feature_flags/development/follow_in_user_popover.yml8
-rw-r--r--doc/api/graphql/reference/index.md73
-rw-r--r--doc/api/users.md3
-rw-r--r--doc/integration/jira/issues.md4
-rw-r--r--doc/topics/autodevops/stages.md123
-rw-r--r--doc/update/index.md16
-rw-r--r--doc/user/application_security/index.md3
-rw-r--r--doc/user/application_security/policies/index.md121
-rw-r--r--doc/user/application_security/security_dashboard/index.md1
-rw-r--r--doc/user/application_security/threat_monitoring/img/threat_monitoring_policy_alert_list_v14_3.pngbin17296 -> 0 bytes
-rw-r--r--doc/user/application_security/threat_monitoring/index.md52
-rw-r--r--doc/user/application_security/vulnerability_report/index.md2
-rw-r--r--doc/user/clusters/agent/work_with_agent.md48
-rw-r--r--doc/user/clusters/applications.md337
-rw-r--r--doc/user/clusters/management_project_template.md4
-rw-r--r--doc/user/infrastructure/clusters/manage/management_project_applications/apparmor.md30
-rw-r--r--doc/user/infrastructure/clusters/manage/management_project_applications/cilium.md122
-rw-r--r--doc/user/infrastructure/clusters/manage/management_project_applications/falco.md95
-rw-r--r--doc/user/infrastructure/clusters/manage/management_project_applications/fluentd.md30
-rw-r--r--doc/user/infrastructure/iac/img/terraform_list_view_v13_8.pngbin74877 -> 0 bytes
-rw-r--r--doc/user/infrastructure/iac/terraform_state.md351
-rw-r--r--doc/user/infrastructure/iac/troubleshooting.md2
-rw-r--r--doc/user/permissions.md1
-rw-r--r--doc/user/profile/personal_access_tokens.md2
-rw-r--r--doc/user/project/clusters/protect/container_host_security/index.md66
-rw-r--r--doc/user/project/clusters/protect/container_host_security/quick_start_guide.md72
-rw-r--r--doc/user/project/clusters/protect/container_network_security/index.md76
-rw-r--r--doc/user/project/clusters/protect/container_network_security/quick_start_guide.md230
-rw-r--r--doc/user/project/clusters/protect/index.md35
-rw-r--r--doc/user/project/settings/index.md21
-rw-r--r--doc/user/project/working_with_projects.md12
-rw-r--r--lib/api/entities/user.rb3
-rw-r--r--lib/gitlab/gitaly_client/repository_service.rb2
-rw-r--r--lib/gitlab/gon_helper.rb1
-rw-r--r--locale/gitlab.pot6
-rw-r--r--spec/controllers/repositories/lfs_storage_controller_spec.rb13
-rw-r--r--spec/frontend/api/user_api_spec.js50
-rw-r--r--spec/frontend/boards/stores/getters_spec.js29
-rw-r--r--spec/frontend/lib/utils/users_cache_spec.js25
-rw-r--r--spec/frontend/runner/components/registration/registration_dropdown_spec.js89
-rw-r--r--spec/frontend/user_popovers_spec.js45
-rw-r--r--spec/frontend/vue_shared/components/user_popover/user_popover_spec.js148
-rw-r--r--spec/lib/api/entities/user_spec.rb45
-rw-r--r--spec/lib/backup/manager_spec.rb619
-rw-r--r--spec/migrations/20210918201050_remove_old_pending_jobs_for_recalculate_vulnerabilities_occurrences_uuid_spec.rb3
-rw-r--r--spec/migrations/20220124130028_dedup_runner_projects_spec.rb2
-rw-r--r--spec/migrations/associate_existing_dast_builds_with_variables_spec.rb2
-rw-r--r--spec/migrations/backfill_cadence_id_for_boards_scoped_to_iteration_spec.rb1
-rw-r--r--spec/migrations/insert_ci_daily_pipeline_schedule_triggers_plan_limits_spec.rb2
-rw-r--r--spec/migrations/migrate_protected_attribute_to_pending_builds_spec.rb2
-rw-r--r--spec/migrations/retry_backfill_traversal_ids_spec.rb2
-rw-r--r--spec/requests/lfs_http_spec.rb63
-rw-r--r--spec/support/helpers/workhorse_helpers.rb6
-rw-r--r--spec/support/helpers/workhorse_lfs_helpers.rb45
-rw-r--r--workhorse/.tool-versions2
69 files changed, 1195 insertions, 2149 deletions
diff --git a/GITALY_SERVER_VERSION b/GITALY_SERVER_VERSION
index 2adf59347d7..292efacb872 100644
--- a/GITALY_SERVER_VERSION
+++ b/GITALY_SERVER_VERSION
@@ -1 +1 @@
-07557c28273b27d750771e2044179730f1cfac16
+86aa7ee82a5dd241fd7d4b33435da0a7ecad12b0
diff --git a/app/assets/javascripts/api/user_api.js b/app/assets/javascripts/api/user_api.js
index 09995fad628..c743b18d572 100644
--- a/app/assets/javascripts/api/user_api.js
+++ b/app/assets/javascripts/api/user_api.js
@@ -10,6 +10,8 @@ const USER_PATH = '/api/:version/users/:id';
const USER_STATUS_PATH = '/api/:version/users/:id/status';
const USER_PROJECTS_PATH = '/api/:version/users/:id/projects';
const USER_POST_STATUS_PATH = '/api/:version/user/status';
+const USER_FOLLOW_PATH = '/api/:version/users/:id/follow';
+const USER_UNFOLLOW_PATH = '/api/:version/users/:id/unfollow';
export function getUsers(query, options) {
const url = buildApiUrl(USERS_PATH);
@@ -69,3 +71,13 @@ export function updateUserStatus({ emoji, message, availability, clearStatusAfte
clear_status_after: clearStatusAfter,
});
}
+
+export function followUser(userId) {
+ const url = buildApiUrl(USER_FOLLOW_PATH).replace(':id', encodeURIComponent(userId));
+ return axios.post(url);
+}
+
+export function unfollowUser(userId) {
+ const url = buildApiUrl(USER_UNFOLLOW_PATH).replace(':id', encodeURIComponent(userId));
+ return axios.post(url);
+}
diff --git a/app/assets/javascripts/boards/components/config_toggle.vue b/app/assets/javascripts/boards/components/config_toggle.vue
index 4746f598ab7..7002fd44294 100644
--- a/app/assets/javascripts/boards/components/config_toggle.vue
+++ b/app/assets/javascripts/boards/components/config_toggle.vue
@@ -1,5 +1,6 @@
<script>
import { GlButton, GlModalDirective, GlTooltipDirective } from '@gitlab/ui';
+import { mapGetters } from 'vuex';
import { formType } from '~/boards/constants';
import eventHub from '~/boards/eventhub';
import { s__, __ } from '~/locale';
@@ -14,8 +15,9 @@ export default {
GlModalDirective,
},
mixins: [Tracking.mixin()],
- inject: ['canAdminList', 'hasScope'],
+ inject: ['canAdminList'],
computed: {
+ ...mapGetters(['hasScope']),
buttonText() {
return this.canAdminList ? s__('Boards|Edit board') : s__('Boards|View scope');
},
diff --git a/app/assets/javascripts/boards/index.js b/app/assets/javascripts/boards/index.js
index 77c5994b5a1..8af7da1e0aa 100644
--- a/app/assets/javascripts/boards/index.js
+++ b/app/assets/javascripts/boards/index.js
@@ -69,7 +69,6 @@ function mountBoardApp(el) {
timeTrackingLimitToHours: parseBoolean(el.dataset.timeTrackingLimitToHours),
issuableType: issuableTypes.issue,
emailsDisabled: parseBoolean(el.dataset.emailsDisabled),
- hasScope: parseBoolean(el.dataset.hasScope),
hasMissingBoards: parseBoolean(el.dataset.hasMissingBoards),
weights: el.dataset.weights ? JSON.parse(el.dataset.weights) : [],
// Permissions
diff --git a/app/assets/javascripts/boards/stores/getters.js b/app/assets/javascripts/boards/stores/getters.js
index cb31eb4b008..e1891a4d954 100644
--- a/app/assets/javascripts/boards/stores/getters.js
+++ b/app/assets/javascripts/boards/stores/getters.js
@@ -51,4 +51,18 @@ export default {
isEpicBoard: () => {
return false;
},
+
+ hasScope: (state) => {
+ const { boardConfig } = state;
+ if (boardConfig.labels?.length > 0) {
+ return true;
+ }
+ let hasScope = false;
+ ['assigneeId', 'iterationCadenceId', 'iterationId', 'milestoneId', 'weight'].forEach((attr) => {
+ if (boardConfig[attr] !== null && boardConfig[attr] !== undefined) {
+ hasScope = true;
+ }
+ });
+ return hasScope;
+ },
};
diff --git a/app/assets/javascripts/graphql_shared/possible_types.json b/app/assets/javascripts/graphql_shared/possible_types.json
index 3d6360fc4f8..7ca3f20ec1c 100644
--- a/app/assets/javascripts/graphql_shared/possible_types.json
+++ b/app/assets/javascripts/graphql_shared/possible_types.json
@@ -76,6 +76,10 @@
"Discussion",
"Note"
],
+ "SecurityPolicySource": [
+ "GroupSecurityPolicySource",
+ "ProjectSecurityPolicySource"
+ ],
"Service": [
"BaseService",
"JiraService"
diff --git a/app/assets/javascripts/lib/utils/users_cache.js b/app/assets/javascripts/lib/utils/users_cache.js
index 54f69ef8e1b..bd000bb26fe 100644
--- a/app/assets/javascripts/lib/utils/users_cache.js
+++ b/app/assets/javascripts/lib/utils/users_cache.js
@@ -35,6 +35,17 @@ class UsersCache extends Cache {
// missing catch is intentional, error handling depends on use case
}
+ updateById(userId, data) {
+ if (!this.hasData(userId)) {
+ return;
+ }
+
+ this.internalStorage[userId] = {
+ ...this.internalStorage[userId],
+ ...data,
+ };
+ }
+
retrieveStatusById(userId) {
if (this.hasData(userId) && this.get(userId).status) {
return Promise.resolve(this.get(userId).status);
diff --git a/app/assets/javascripts/runner/components/registration/registration_dropdown.vue b/app/assets/javascripts/runner/components/registration/registration_dropdown.vue
index 3fbe3c1be74..bb2a8ddf151 100644
--- a/app/assets/javascripts/runner/components/registration/registration_dropdown.vue
+++ b/app/assets/javascripts/runner/components/registration/registration_dropdown.vue
@@ -96,7 +96,7 @@ export default {
<runner-instructions-modal
v-if="instructionsModalOpened"
ref="runnerInstructionsModal"
- :registration-token="registrationToken"
+ :registration-token="currentRegistrationToken"
data-testid="runner-instructions-modal"
/>
</gl-dropdown-item>
diff --git a/app/assets/javascripts/user_popovers.js b/app/assets/javascripts/user_popovers.js
index 4413be384e5..438ae2bc1bc 100644
--- a/app/assets/javascripts/user_popovers.js
+++ b/app/assets/javascripts/user_popovers.js
@@ -32,6 +32,7 @@ const populateUserInfo = (user) => {
([userData, status]) => {
if (userData) {
Object.assign(user, {
+ id: userId,
avatarUrl: userData.avatar_url,
bot: userData.bot,
username: userData.username,
@@ -42,6 +43,7 @@ const populateUserInfo = (user) => {
websiteUrl: userData.website_url,
pronouns: userData.pronouns,
localTime: userData.local_time,
+ isFollowed: userData.is_followed,
loaded: true,
});
}
@@ -97,6 +99,7 @@ export default function addPopovers(elements = document.querySelectorAll('.js-us
bio: null,
workInformation: null,
status: null,
+ isFollowed: false,
loaded: false,
};
const renderedPopover = new UserPopoverComponent({
@@ -107,6 +110,18 @@ export default function addPopovers(elements = document.querySelectorAll('.js-us
},
});
+ const { userId } = el.dataset;
+
+ renderedPopover.$on('follow', () => {
+ UsersCache.updateById(userId, { is_followed: true });
+ user.isFollowed = true;
+ });
+
+ renderedPopover.$on('unfollow', () => {
+ UsersCache.updateById(userId, { is_followed: false });
+ user.isFollowed = false;
+ });
+
initializedPopovers.set(el, renderedPopover);
renderedPopover.$mount();
diff --git a/app/assets/javascripts/vue_shared/components/user_popover/user_popover.vue b/app/assets/javascripts/vue_shared/components/user_popover/user_popover.vue
index 2c09fa71230..01a0b134b7f 100644
--- a/app/assets/javascripts/vue_shared/components/user_popover/user_popover.vue
+++ b/app/assets/javascripts/vue_shared/components/user_popover/user_popover.vue
@@ -6,9 +6,13 @@ import {
GlIcon,
GlSafeHtmlDirective,
GlSprintf,
+ GlButton,
} from '@gitlab/ui';
+import { __ } from '~/locale';
import UserNameWithStatus from '~/sidebar/components/assignees/user_name_with_status.vue';
import { glEmojiTag } from '~/emoji';
+import createFlash from '~/flash';
+import { followUser, unfollowUser } from '~/rest_api';
import UserAvatarImage from '../user_avatar/user_avatar_image.vue';
const MAX_SKELETON_LINES = 4;
@@ -24,6 +28,7 @@ export default {
UserAvatarImage,
UserNameWithStatus,
GlSprintf,
+ GlButton,
},
directives: {
SafeHtml: GlSafeHtmlDirective,
@@ -44,6 +49,11 @@ export default {
default: 'top',
},
},
+ data() {
+ return {
+ toggleFollowLoading: false,
+ };
+ },
computed: {
statusHtml() {
if (!this.user.status) {
@@ -64,6 +74,69 @@ export default {
availabilityStatus() {
return this.user?.status?.availability || '';
},
+ isNotCurrentUser() {
+ return !this.userIsLoading && this.user.username !== gon.current_username;
+ },
+ shouldRenderToggleFollowButton() {
+ return (
+ /*
+ * We're using `gon` to access feature flag because this component
+ * gets initialized dynamically multiple times from `user_popovers.js`
+ * for each user link present on the page, and using `glFeatureFlagMixin()`
+ * doesn't inject available feature flags into the component during init.
+ */
+ gon?.features?.followInUserPopover &&
+ this.isNotCurrentUser &&
+ typeof this.user?.isFollowed !== 'undefined'
+ );
+ },
+ toggleFollowButtonText() {
+ if (this.toggleFollowLoading) return null;
+
+ return this.user?.isFollowed ? __('Unfollow') : __('Follow');
+ },
+ toggleFollowButtonVariant() {
+ return this.user?.isFollowed ? 'default' : 'confirm';
+ },
+ },
+ methods: {
+ async toggleFollow() {
+ if (this.user.isFollowed) {
+ this.unfollow();
+ } else {
+ this.follow();
+ }
+ },
+ async follow() {
+ this.toggleFollowLoading = true;
+ try {
+ await followUser(this.user.id);
+ this.$emit('follow');
+ } catch (error) {
+ createFlash({
+ message: __('An error occurred while trying to follow this user, please try again.'),
+ error,
+ captureError: true,
+ });
+ } finally {
+ this.toggleFollowLoading = false;
+ }
+ },
+ async unfollow() {
+ this.toggleFollowLoading = true;
+ try {
+ await unfollowUser(this.user.id);
+ this.$emit('unfollow');
+ } catch (error) {
+ createFlash({
+ message: __('An error occurred while trying to unfollow this user, please try again.'),
+ error,
+ captureError: true,
+ });
+ } finally {
+ this.toggleFollowLoading = false;
+ }
+ },
},
safeHtmlConfig: { ADD_TAGS: ['gl-emoji'] },
};
@@ -73,10 +146,22 @@ export default {
<!-- 200ms delay so not every mouseover triggers Popover -->
<gl-popover :target="target" :delay="200" :placement="placement" boundary="viewport">
<div class="gl-p-3 gl-line-height-normal gl-display-flex" data-testid="user-popover">
- <div class="gl-p-2 flex-shrink-1">
+ <div
+ class="gl-p-2 flex-shrink-1 gl-display-flex gl-flex-direction-column align-items-center gl-w-70p"
+ >
<user-avatar-image :img-src="user.avatarUrl" :size="64" css-classes="gl-mr-3!" />
+ <div v-if="shouldRenderToggleFollowButton" class="gl-mt-3">
+ <gl-button
+ :variant="toggleFollowButtonVariant"
+ :loading="toggleFollowLoading"
+ size="small"
+ data-testid="toggle-follow-button"
+ @click="toggleFollow"
+ >{{ toggleFollowButtonText }}</gl-button
+ >
+ </div>
</div>
- <div class="gl-p-2 gl-w-full gl-min-w-0">
+ <div class="gl-w-full gl-min-w-0">
<template v-if="userIsLoading">
<gl-skeleton-loader
:lines="$options.maxSkeletonLines"
diff --git a/app/controllers/repositories/lfs_storage_controller.rb b/app/controllers/repositories/lfs_storage_controller.rb
index 252b604dcb0..d54b51b463a 100644
--- a/app/controllers/repositories/lfs_storage_controller.rb
+++ b/app/controllers/repositories/lfs_storage_controller.rb
@@ -6,6 +6,8 @@ module Repositories
include WorkhorseRequest
include SendFileUpload
+ InvalidUploadedFile = Class.new(StandardError)
+
skip_before_action :verify_workhorse_api!, only: :download
# added here as a part of the refactor, will be removed
@@ -44,6 +46,8 @@ module Repositories
end
def upload_finalize
+ validate_uploaded_file!
+
if store_file!(oid, size)
head 200, content_type: LfsRequest::CONTENT_TYPE
else
@@ -55,6 +59,8 @@ module Repositories
render_lfs_forbidden
rescue ObjectStorage::RemoteStoreError
render_lfs_forbidden
+ rescue InvalidUploadedFile
+ render plain: 'SHA256 or size mismatch', status: :bad_request
end
private
@@ -117,5 +123,13 @@ module Repositories
lfs_object: object
)
end
+
+ def validate_uploaded_file!
+ return unless uploaded_file
+
+ if size != uploaded_file.size || oid != uploaded_file.sha256
+ raise InvalidUploadedFile
+ end
+ end
end
end
diff --git a/app/helpers/application_settings_helper.rb b/app/helpers/application_settings_helper.rb
index 456a678ce77..dbdfa0c1eab 100644
--- a/app/helpers/application_settings_helper.rb
+++ b/app/helpers/application_settings_helper.rb
@@ -97,24 +97,18 @@ module ApplicationSettingsHelper
end
end
- def oauth_providers_checkboxes
+ def oauth_providers_checkboxes(form)
button_based_providers.map do |source|
- disabled = @application_setting.disabled_oauth_sign_in_sources.include?(source.to_s)
+ checked = !@application_setting.disabled_oauth_sign_in_sources.include?(source.to_s)
name = Gitlab::Auth::OAuth::Provider.label_for(source)
- checkbox_name = 'application_setting[enabled_oauth_sign_in_sources][]'
- checkbox_id = "application_setting_enabled_oauth_sign_in_sources_#{name.parameterize(separator: '_')}"
-
- content_tag :div, class: 'form-check' do
- check_box_tag(
- checkbox_name,
- source,
- !disabled,
- autocomplete: 'off',
- id: checkbox_id,
- class: 'form-check-input'
- ) +
- label_tag(checkbox_id, name, class: 'form-check-label')
- end
+
+ form.gitlab_ui_checkbox_component(
+ :enabled_oauth_sign_in_sources,
+ name,
+ checkbox_options: { checked: checked, multiple: true, autocomplete: 'off' },
+ checked_value: source,
+ unchecked_value: nil
+ )
end
end
diff --git a/app/helpers/boards_helper.rb b/app/helpers/boards_helper.rb
index f849f36bf84..f98e70e41d8 100644
--- a/app/helpers/boards_helper.rb
+++ b/app/helpers/boards_helper.rb
@@ -25,7 +25,6 @@ module BoardsHelper
labels_manage_path: labels_manage_path,
releases_fetch_path: releases_fetch_path,
board_type: board.to_type,
- has_scope: board.scoped?.to_s,
has_missing_boards: has_missing_boards?.to_s,
multiple_boards_available: multiple_boards_available?.to_s,
board_base_url: board_base_url
diff --git a/app/views/admin/application_settings/_signin.html.haml b/app/views/admin/application_settings/_signin.html.haml
index bce210d28d3..28e0ee25a5d 100644
--- a/app/views/admin/application_settings/_signin.html.haml
+++ b/app/views/admin/application_settings/_signin.html.haml
@@ -1,4 +1,4 @@
-= form_for @application_setting, url: general_admin_application_settings_path(anchor: 'js-signin-settings'), html: { class: 'fieldset-form', id: 'signin-settings' } do |f|
+= gitlab_ui_form_for @application_setting, url: general_admin_application_settings_path(anchor: 'js-signin-settings'), html: { class: 'fieldset-form', id: 'signin-settings' } do |f|
= form_errors(@application_setting)
%fieldset
@@ -23,7 +23,7 @@
%fieldset.form-group
%legend.gl-font-base.gl-mb-3.gl-border-none.gl-font-weight-bold= _('Enabled OAuth authentication sources')
= hidden_field_tag 'application_setting[enabled_oauth_sign_in_sources][]'
- - oauth_providers_checkboxes.each do |source|
+ - oauth_providers_checkboxes(f).each do |source|
= source
.form-group
= f.label :two_factor_authentication, _('Two-factor authentication'), class: 'label-bold'
diff --git a/config/feature_flags/development/follow_in_user_popover.yml b/config/feature_flags/development/follow_in_user_popover.yml
new file mode 100644
index 00000000000..579e8507ac1
--- /dev/null
+++ b/config/feature_flags/development/follow_in_user_popover.yml
@@ -0,0 +1,8 @@
+---
+name: follow_in_user_popover
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/76050
+rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/355070
+milestone: '14.9'
+type: development
+group: group::workspace
+default_enabled: false
diff --git a/doc/api/graphql/reference/index.md b/doc/api/graphql/reference/index.md
index ac4df4f55ad..9b021a92061 100644
--- a/doc/api/graphql/reference/index.md
+++ b/doc/api/graphql/reference/index.md
@@ -11820,6 +11820,23 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="grouprunnerstaglist"></a>`tagList` | [`[String!]`](#string) | Filter by tags associated with the runner (comma-separated or array). |
| <a id="grouprunnerstype"></a>`type` | [`CiRunnerType`](#cirunnertype) | Filter runners by type. |
+##### `Group.scanExecutionPolicies`
+
+Scan Execution Policies of the namespace.
+
+Returns [`ScanExecutionPolicyConnection`](#scanexecutionpolicyconnection).
+
+This field returns a [connection](#connections). It accepts the
+four standard [pagination arguments](#connection-pagination-arguments):
+`before: String`, `after: String`, `first: Int`, `last: Int`.
+
+###### Arguments
+
+| Name | Type | Description |
+| ---- | ---- | ----------- |
+| <a id="groupscanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`. |
+| <a id="groupscanexecutionpoliciesrelationship"></a>`relationship` | [`SecurityPolicyRelationType`](#securitypolicyrelationtype) | Filter policies by the given policy relationship. |
+
##### `Group.timelogs`
Time logged on issues and merge requests in the group and its subgroups.
@@ -11986,6 +12003,17 @@ Contains release-related statistics about a group.
| <a id="groupreleasestatsreleasescount"></a>`releasesCount` | [`Int`](#int) | Total number of releases in all descendant projects of the group. |
| <a id="groupreleasestatsreleasespercentage"></a>`releasesPercentage` | [`Int`](#int) | Percentage of the group's descendant projects that have at least one release. |
+### `GroupSecurityPolicySource`
+
+Represents the source of a security policy belonging to a group.
+
+#### Fields
+
+| Name | Type | Description |
+| ---- | ---- | ----------- |
+| <a id="groupsecuritypolicysourceinherited"></a>`inherited` | [`Boolean!`](#boolean) | Indicates whether this policy is inherited from parent group. |
+| <a id="groupsecuritypolicysourcenamespace"></a>`namespace` | [`Namespace`](#namespace) | Project the policy is associated with. |
+
### `GroupStats`
Contains statistics about a group.
@@ -13811,6 +13839,23 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="namespaceprojectssearch"></a>`search` | [`String`](#string) | Search project with most similar names or paths. |
| <a id="namespaceprojectssort"></a>`sort` | [`NamespaceProjectSort`](#namespaceprojectsort) | Sort projects by this criteria. |
+##### `Namespace.scanExecutionPolicies`
+
+Scan Execution Policies of the namespace.
+
+Returns [`ScanExecutionPolicyConnection`](#scanexecutionpolicyconnection).
+
+This field returns a [connection](#connections). It accepts the
+four standard [pagination arguments](#connection-pagination-arguments):
+`before: String`, `after: String`, `first: Int`, `last: Int`.
+
+###### Arguments
+
+| Name | Type | Description |
+| ---- | ---- | ----------- |
+| <a id="namespacescanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`. |
+| <a id="namespacescanexecutionpoliciesrelationship"></a>`relationship` | [`SecurityPolicyRelationType`](#securitypolicyrelationtype) | Filter policies by the given policy relationship. |
+
### `NetworkPolicy`
Represents the network policy.
@@ -15265,6 +15310,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="projectscanexecutionpoliciesactionscantypes"></a>`actionScanTypes` | [`[SecurityReportTypeEnum!]`](#securityreporttypeenum) | Filters policies by the action scan type. Only these scan types are supported: `dast`, `secret_detection`, `cluster_image_scanning`, `container_scanning`, `sast`. |
+| <a id="projectscanexecutionpoliciesrelationship"></a>`relationship` | [`SecurityPolicyRelationType`](#securitypolicyrelationtype) | Filter policies by the given policy relationship. |
##### `Project.securityTrainingProviders`
@@ -15543,6 +15589,16 @@ Returns [`UserMergeRequestInteraction`](#usermergerequestinteraction).
| <a id="projectpermissionsupdatewiki"></a>`updateWiki` | [`Boolean!`](#boolean) | Indicates the user can perform `update_wiki` on this resource. |
| <a id="projectpermissionsuploadfile"></a>`uploadFile` | [`Boolean!`](#boolean) | Indicates the user can perform `upload_file` on this resource. |
+### `ProjectSecurityPolicySource`
+
+Represents the source of a security policy belonging to a project.
+
+#### Fields
+
+| Name | Type | Description |
+| ---- | ---- | ----------- |
+| <a id="projectsecuritypolicysourceproject"></a>`project` | [`Project`](#project) | Project the policy is associated with. |
+
### `ProjectSecurityTraining`
#### Fields
@@ -16047,6 +16103,7 @@ Represents the scan execution policy.
| <a id="scanexecutionpolicydescription"></a>`description` | [`String!`](#string) | Description of the policy. |
| <a id="scanexecutionpolicyenabled"></a>`enabled` | [`Boolean!`](#boolean) | Indicates whether this policy is enabled. |
| <a id="scanexecutionpolicyname"></a>`name` | [`String!`](#string) | Name of the policy. |
+| <a id="scanexecutionpolicysource"></a>`source` | [`SecurityPolicySource!`](#securitypolicysource) | Source of the policy. Its fields depend on the source type. |
| <a id="scanexecutionpolicyupdatedat"></a>`updatedAt` | [`Time!`](#time) | Timestamp of when the policy YAML was last updated. |
| <a id="scanexecutionpolicyyaml"></a>`yaml` | [`String!`](#string) | YAML definition of the policy. |
@@ -18992,6 +19049,13 @@ The status of the security scan.
| <a id="scanstatusreport_error"></a>`REPORT_ERROR` | The report artifact provided by the CI build couldn't be parsed. |
| <a id="scanstatussucceeded"></a>`SUCCEEDED` | The report has been successfully prepared. |
+### `SecurityPolicyRelationType`
+
+| Value | Description |
+| ----- | ----------- |
+| <a id="securitypolicyrelationtypedirect"></a>`DIRECT` | Policies defined for the project only. |
+| <a id="securitypolicyrelationtypeinherited"></a>`INHERITED` | Policies defined for the project and project's ancestor groups. |
+
### `SecurityReportTypeEnum`
| Value | Description |
@@ -20061,6 +20125,15 @@ One of:
- [`NugetMetadata`](#nugetmetadata)
- [`PypiMetadata`](#pypimetadata)
+#### `SecurityPolicySource`
+
+Represents a policy source. Its fields depend on the source type.
+
+One of:
+
+- [`GroupSecurityPolicySource`](#groupsecuritypolicysource)
+- [`ProjectSecurityPolicySource`](#projectsecuritypolicysource)
+
#### `VulnerabilityDetail`
Represents a vulnerability detail field. The fields with data will depend on the vulnerability detail type.
diff --git a/doc/api/users.md b/doc/api/users.md
index 7b4962735e6..e58b28fa84c 100644
--- a/doc/api/users.md
+++ b/doc/api/users.md
@@ -309,7 +309,8 @@ Parameters:
"work_information": null,
"followers": 1,
"following": 1,
- "local_time": "3:38 PM"
+ "local_time": "3:38 PM",
+ "is_followed": false
}
```
diff --git a/doc/integration/jira/issues.md b/doc/integration/jira/issues.md
index 28998851697..aaff5de767b 100644
--- a/doc/integration/jira/issues.md
+++ b/doc/integration/jira/issues.md
@@ -138,6 +138,10 @@ of these filters:
Enhancements to use these filters through the user interface
[are planned](https://gitlab.com/groups/gitlab-org/-/epics/3622).
+## Create a Jira issue for a vulnerability **(ULTIMATE)**
+
+You can create a Jira issue for a vulnerability from a [Vulnerability Page](../../user/application_security/vulnerabilities/index.md#create-a-jira-issue-for-a-vulnerability).
+
## Automatic issue transitions
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55773) in GitLab 13.11.
diff --git a/doc/topics/autodevops/stages.md b/doc/topics/autodevops/stages.md
index 9095944ad30..038d4daed65 100644
--- a/doc/topics/autodevops/stages.md
+++ b/doc/topics/autodevops/stages.md
@@ -551,129 +551,6 @@ workers:
terminationGracePeriodSeconds: 60
```
-### Network Policy
-
-- [Introduced](https://gitlab.com/gitlab-org/charts/auto-deploy-app/-/merge_requests/30) in GitLab 12.7.
-- [Deprecated](https://gitlab.com/gitlab-org/cluster-integration/auto-deploy-image/-/merge_requests/184) in GitLab 13.9.
-
-By default, all Kubernetes pods are
-[non-isolated](https://kubernetes.io/docs/concepts/services-networking/network-policies/#isolated-and-non-isolated-pods),
-and accept traffic to and from any source. You can use
-[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
-to restrict connections to and from selected pods, namespaces, and the Internet.
-
-NOTE:
-You must use a Kubernetes network plugin that implements support for
-`NetworkPolicy`. The default network plugin for Kubernetes (`kubenet`)
-[does not implement](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#kubenet)
-support for it. The [Cilium](https://cilium.io/) network plugin can be
-installed as a [cluster application](../../user/project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium)
-to enable support for network policies.
-
-You can enable deployment of a network policy by setting the following
-in the `.gitlab/auto-deploy-values.yaml` file:
-
-```yaml
-networkPolicy:
- enabled: true
-```
-
-The default policy deployed by the Auto Deploy pipeline allows
-traffic within a local namespace, and from the `gitlab-managed-apps`
-namespace. All other inbound connections are blocked. Outbound
-traffic (for example, to the Internet) is not affected by the default policy.
-
-You can also provide a custom [policy specification](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
-in the `.gitlab/auto-deploy-values.yaml` file, for example:
-
-```yaml
-networkPolicy:
- enabled: true
- spec:
- podSelector:
- matchLabels:
- app.gitlab.com/env: staging
- ingress:
- - from:
- - podSelector:
- matchLabels: {}
- - namespaceSelector:
- matchLabels:
- app.gitlab.com/managed_by: gitlab
-```
-
-For more information on installing Network Policies, see
-[Use the Cluster Management Template to Install Cilium](../../user/project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium).
-
-### Cilium Network Policy
-
-> [Introduced](https://gitlab.com/gitlab-org/cluster-integration/auto-deploy-image/-/merge_requests/184) in GitLab 13.9.
-
-By default, all Kubernetes pods are
-[non-isolated](https://kubernetes.io/docs/concepts/services-networking/network-policies/#isolated-and-non-isolated-pods),
-and accept traffic to and from any source. You can use
-[CiliumNetworkPolicy](https://docs.cilium.io/en/v1.8/concepts/kubernetes/policy/#ciliumnetworkpolicy)
-to restrict connections to and from selected pods, namespaces, and the internet.
-
-#### Requirements
-
-As the default network plugin for Kubernetes (`kubenet`)
-[does not implement](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#kubenet)
-support for it, you must have [Cilium](https://docs.cilium.io/en/v1.8/intro/) as your Kubernetes network plugin.
-
-The [Cilium](https://cilium.io/) network plugin can be
-installed with a [cluster management project template](../../user/project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium)
-to enable support for network policies.
-
-#### Configuration
-
-You can enable deployment of a network policy by setting the following
-in the `.gitlab/auto-deploy-values.yaml` file:
-
-```yaml
-ciliumNetworkPolicy:
- enabled: true
-```
-
-The default policy deployed by the Auto Deploy pipeline allows
-traffic within a local namespace, and from the `gitlab-managed-apps`
-namespace. All other inbound connections are blocked. Outbound
-traffic (for example, to the internet) is not affected by the default policy.
-
-You can also provide a custom [policy specification](https://docs.cilium.io/en/v1.8/policy/language/#simple-ingress-allow)
-in the `.gitlab/auto-deploy-values.yaml` file, for example:
-
-```yaml
-ciliumNetworkPolicy:
- enabled: true
- spec:
- endpointSelector:
- matchLabels:
- app.gitlab.com/env: staging
- ingress:
- - fromEndpoints:
- - matchLabels:
- app.gitlab.com/managed_by: gitlab
-```
-
-#### Enabling Alerts
-
-You can also enable alerts. Network policies with alerts are considered only if
-the [agent](../../user/clusters/agent/index.md)
-has been integrated.
-
-You can enable alerts as follows:
-
-```yaml
-ciliumNetworkPolicy:
- enabled: true
- alerts:
- enabled: true
-```
-
-For more information on installing Network Policies, see
-[Use the Cluster Management Template to Install Cilium](../../user/project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium).
-
### Running commands in the container
Applications built with [Auto Build](#auto-build) using Herokuish, the default
diff --git a/doc/update/index.md b/doc/update/index.md
index 01f57396bbb..5e82dbfcfd6 100644
--- a/doc/update/index.md
+++ b/doc/update/index.md
@@ -192,9 +192,13 @@ pending_job_classes.each { |job_class| Gitlab::BackgroundMigration.steal(job_cla
#### Background migrations stuck in 'pending' state
GitLab 13.6 introduced an issue where a background migration named `BackfillJiraTrackerDeploymentType2` can be permanently stuck in a **pending** state across upgrades. To clean up this stuck migration, see the [13.6.0 version-specific instructions](#1360).
+
GitLab 14.4 introduced an issue where a background migration named `PopulateTopicsTotalProjectsCountCache` can be permanently stuck in a **pending** state across upgrades when the instance lacks records that match the migration's target. To clean up this stuck migration, see the [14.4.0 version-specific instructions](#1440).
+
GitLab 14.8 introduced an issue where a background migration named `PopulateTopicsNonPrivateProjectsCount` can be permanently stuck in a **pending** state across upgrades. To clean up this stuck migration, see the [14.8.0 version-specific instructions](#1480).
+GitLab 14.9 introduced an issue where a background migration named `ResetDuplicateCiRunnersTokenValuesOnProjects` can be permanently stuck in a **pending** state across upgrades when the instance lacks records that match the migration's target. To clean up this stuck migration, see the [14.9.0 version-specific instructions](#1490).
+
For other background migrations stuck in pending, run the following check. If it returns non-zero and the count does not decrease over time, follow the rest of the steps in this section.
```shell
@@ -415,6 +419,18 @@ and [Helm Chart deployments](https://docs.gitlab.com/charts/). They come with ap
Expected batched background migration for the given configuration to be marked as 'finished', but it is 'active':
```
+- GitLab 14.9.0 includes a
+ [background migration `ResetDuplicateCiRunnersTokenValuesOnProjects`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/79140)
+ that may remain stuck permanently in a **pending** state.
+
+ To clean up this stuck job, run the following in the [GitLab Rails Console](../administration/operations/rails_console.md):
+
+ ```ruby
+ Gitlab::Database::BackgroundMigrationJob.pending.where(class_name: "ResetDuplicateCiRunnersTokenValuesOnProjects").find_each do |job|
+ puts Gitlab::Database::BackgroundMigrationJob.mark_all_as_succeeded("ResetDuplicateCiRunnersTokenValuesOnProjects", job.arguments)
+ end
+ ```
+
### 14.8.0
- If upgrading from a version earlier than 14.6.5, 14.7.4, or 14.8.2, please review the [Critical Security Release: 14.8.2, 14.7.4, and 14.6.5](https://about.gitlab.com/releases/2022/02/25/critical-security-release-gitlab-14-8-2-released/) blog post.
diff --git a/doc/user/application_security/index.md b/doc/user/application_security/index.md
index 3a6aa8e3485..3de43ed40ea 100644
--- a/doc/user/application_security/index.md
+++ b/doc/user/application_security/index.md
@@ -19,9 +19,6 @@ GitLab also provides high-level statistics of vulnerabilities across projects an
- The [Security Dashboard](security_dashboard/index.md) provides a
high-level view of vulnerabilities detected in your projects, pipeline, and groups.
-- The [Threat Monitoring](threat_monitoring/index.md) page provides runtime security metrics
- for application environments. With the information provided,
- you can immediately begin risk analysis and remediation.
<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
For an overview of GitLab application security, see [Shifting Security Left](https://www.youtube.com/watch?v=XnYstHObqlA&t).
diff --git a/doc/user/application_security/policies/index.md b/doc/user/application_security/policies/index.md
index 81d24104340..214e5f955bc 100644
--- a/doc/user/application_security/policies/index.md
+++ b/doc/user/application_security/policies/index.md
@@ -19,7 +19,6 @@ GitLab supports the following security policies:
- [Scan Execution Policy](scan-execution-policies.md)
- [Scan Result Policy](scan-result-policies.md)
-- [Container Network Policy](#container-network-policy) (DEPRECATED)
## Security policy project
@@ -83,21 +82,6 @@ status), and create and edit deployed policies:
![Policies List Page](img/policies_list_v14_3.png)
-Network policies are fetched directly from the selected environment's
-deployment platform while other policies are fetched from the project's
-security policy project. Changes performed outside of this tab are
-reflected upon refresh.
-
-By default, the policy list contains predefined network policies in a
-disabled state. Once enabled, a predefined policy deploys to the
-selected environment's deployment platform and you can manage it like
-the regular policies.
-
-Note that if you're using [Auto DevOps](../../../topics/autodevops/index.md)
-and change a policy in this section, your `auto-deploy-values.yaml` file doesn't update. Auto DevOps
-users must make changes by following the
-[Container Network Policy documentation](../../../topics/autodevops/stages.md#network-policy).
-
## Policy editor
> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/3403) in GitLab 13.4.
@@ -144,111 +128,6 @@ See [Scan execution policies](scan-execution-policies.md).
See [Scan result policies](scan-result-policies.md).
-## Container Network Policy
-
-> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/32365) in GitLab 12.9.
-> - [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476) in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477) in GitLab 15.0.
-
-WARNING:
-Container Network Policy is in its end-of-life process. It's [deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476)
-in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477)
-in GitLab 15.0.
-
-The **Container Network Policy** section provides packet flow metrics for
-your application's Kubernetes namespace. This section has the following
-prerequisites:
-
-- Your project contains at least one [environment](../../../ci/environments/index.md).
-- You've [installed Cilium](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium).
-- You've configured the [Prometheus service](../../project/integrations/prometheus.md#enabling-prometheus-integration).
-
-If you're using custom Helm values for Cilium, you must enable Hubble
-with flow metrics for each namespace by adding the following lines to
-your [Cilium values](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium):
-
-```yaml
-hubble:
- enabled: true
- metrics:
- enabled:
- - 'flow:sourceContext=namespace;destinationContext=namespace'
-```
-
-The **Container Network Policy** section displays the following information
-about your packet flow:
-
-- The total amount of the inbound and outbound packets
-- The proportion of packets dropped according to the configured
- policies
-- The per-second average rate of the forwarded and dropped packets
- accumulated over time window for the requested time interval
-
-If a significant percentage of packets is dropped, you should
-investigate it for potential threats by
-examining the Cilium logs:
-
-```shell
-kubectl -n gitlab-managed-apps logs -l k8s-app=cilium -c cilium-monitor
-```
-
-### Change the status
-
-To change a network policy's status:
-
-- Select the network policy you want to update.
-- Select **Edit policy**.
-- Select the **Policy status** toggle to update the selected policy.
-- Select **Save changes** to deploy network policy changes.
-
-Disabled network policies have the `network-policy.gitlab.com/disabled_by: gitlab` selector inside
-the `podSelector` block. This narrows the scope of such a policy and as a result it doesn't affect
-any pods. The policy itself is still deployed to the corresponding deployment namespace.
-
-### Container Network Policy editor
-
-The policy editor only supports the [CiliumNetworkPolicy](https://docs.cilium.io/en/v1.8/policy/)
-specification. Regular Kubernetes [NetworkPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#networkpolicy-v1-networking-k8s-io)
-resources aren't supported.
-
-Rule mode supports the following rule types:
-
-- [Labels](https://docs.cilium.io/en/v1.8/policy/language/#labels-based).
-- [Entities](https://docs.cilium.io/en/v1.8/policy/language/#entities-based).
-- [IP/CIDR](https://docs.cilium.io/en/v1.8/policy/language/#ip-cidr-based). Only
- the `toCIDR` block without `except` is supported.
-- [DNS](https://docs.cilium.io/en/v1.8/policy/language/#dns-based).
-- [Level 4](https://docs.cilium.io/en/v1.8/policy/language/#layer-4-examples)
- can be added to all other rules.
-
-Once your policy is complete, save it by selecting **Save policy**
-at the bottom of the editor. Existing policies can also be
-removed from the editor interface by selecting **Delete policy**
-at the bottom of the editor.
-
-### Configure a Network Policy Alert
-
-> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/3438) and [enabled by default](https://gitlab.com/gitlab-org/gitlab/-/issues/287676) in GitLab 13.9.
-> - The feature flag was removed and the Threat Monitoring Alerts Project was [made generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/287676) in GitLab 14.0.
-
-You can use policy alerts to track your policy's impact. Alerts are only available if you've
-[installed](../../clusters/agent/repository.md)
-and [configured](../../clusters/agent/install/index.md#register-the-agent-with-gitlab)
-an agent for this project.
-
-There are two ways to create policy alerts:
-
-- In the [policy editor UI](#container-network-policy-editor),
- by clicking **Add alert**.
-- In the policy editor's YAML mode, through the `metadata.annotations` property:
-
- ```yaml
- metadata:
- annotations:
- app.gitlab.com/alert: 'true'
- ```
-
-Once added, the UI updates and displays a warning about the dangers of too many alerts.
-
## Roadmap
See the [Category Direction page](https://about.gitlab.com/direction/protect/security_orchestration/)
diff --git a/doc/user/application_security/security_dashboard/index.md b/doc/user/application_security/security_dashboard/index.md
index 488ec336646..577606885ca 100644
--- a/doc/user/application_security/security_dashboard/index.md
+++ b/doc/user/application_security/security_dashboard/index.md
@@ -17,6 +17,7 @@ To use the Security Dashboards, you must:
- Configure jobs to use the [`reports` syntax](../../../ci/yaml/index.md#artifactsreports).
- Use [GitLab Runner](https://docs.gitlab.com/runner/) 11.5 or later. If you use the
shared runners on GitLab.com, you are using the correct version.
+- Have the [correct role](../../permissions.md) for the project or group.
## When Security Dashboards are updated
diff --git a/doc/user/application_security/threat_monitoring/img/threat_monitoring_policy_alert_list_v14_3.png b/doc/user/application_security/threat_monitoring/img/threat_monitoring_policy_alert_list_v14_3.png
deleted file mode 100644
index a11a7fafc4a..00000000000
--- a/doc/user/application_security/threat_monitoring/img/threat_monitoring_policy_alert_list_v14_3.png
+++ /dev/null
Binary files differ
diff --git a/doc/user/application_security/threat_monitoring/index.md b/doc/user/application_security/threat_monitoring/index.md
deleted file mode 100644
index 9b8dd2825ea..00000000000
--- a/doc/user/application_security/threat_monitoring/index.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-type: reference, howto
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Threat Monitoring **(ULTIMATE)**
-
-> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/14707) in GitLab 12.9.
-> - [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476) in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477) in GitLab 15.0.
-
-WARNING:
-Threat Monitoring is in its end-of-life process. It's [deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476)
-in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477)
-in GitLab 15.0.
-
-The **Threat Monitoring** page provides alerts and metrics
-for the GitLab application runtime security features. You can access
-these by navigating to your project's **Security & Compliance > Threat
-Monitoring** page.
-
-GitLab supports statistics for the following security features:
-
-- [Container Network Policies](../../../topics/autodevops/stages.md#network-policy)
-
-## Container Network Policy Alert list
-
-> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/3438) in GitLab 13.9.
-
-The policy alert list displays your policy's alert activity. You can sort the list by these columns:
-
-- Date and time
-- Events
-- Status
-
-You can filter the list with the **Policy Name** filter and the **Status** filter at the top. Use
-the selector menu in the **Status** column to set the status for each alert:
-
-- Unreviewed
-- In review
-- Resolved
-- Dismissed
-
-By default, the list doesn't display resolved or dismissed alerts.
-
-![Policy Alert List](img/threat_monitoring_policy_alert_list_v14_3.png)
-
-Clicking an alert's row opens the alert drawer, which shows more information about the alert. A user
-can also create an incident from the alert and update the alert status in the alert drawer.
-
-Clicking an alert's name takes the user to the [alert details page](../../../operations/incident_management/alerts.md#alert-details-page).
diff --git a/doc/user/application_security/vulnerability_report/index.md b/doc/user/application_security/vulnerability_report/index.md
index 1e390b1a319..7cadc73e8c3 100644
--- a/doc/user/application_security/vulnerability_report/index.md
+++ b/doc/user/application_security/vulnerability_report/index.md
@@ -11,7 +11,7 @@ The Vulnerability Report provides information about vulnerabilities from scans o
The scan results from a pipeline are only ingested after all the jobs in the pipeline complete. Partial results for a pipeline with jobs in progress can be seen in the pipeline security tab.
-The report is available for projects, groups, and the Security Center.
+The report is available for users with the [correct role](../../permissions.md) on projects, groups, and the Security Center.
At all levels, the Vulnerability Report contains:
diff --git a/doc/user/clusters/agent/work_with_agent.md b/doc/user/clusters/agent/work_with_agent.md
index e8622bca0e9..8872ecf7ce5 100644
--- a/doc/user/clusters/agent/work_with_agent.md
+++ b/doc/user/clusters/agent/work_with_agent.md
@@ -161,51 +161,3 @@ To remove an agent from the UI:
```shell
kubectl delete -n gitlab-kubernetes-agent -f ./resources.yml
```
-
-## Surface network security alerts from cluster to GitLab **(ULTIMATE)**
-
-> [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476) in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477) in GitLab 15.0.
-
-WARNING:
-Cilium integration is in its end-of-life process. It's [deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476)
-in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477)
-in GitLab 15.0.
-
-The agent for Kubernetes also provides an integration with Cilium. This integration provides a simple way to
-generate network policy-related alerts and to surface those alerts in GitLab.
-
-Several components work in concert for the agent to generate the alerts:
-
-- A working Kubernetes cluster.
-- Cilium integration through either of these options:
- - Installation through [cluster management template](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium).
- - Enablement of [hubble-relay](https://docs.cilium.io/en/v1.8/concepts/overview/#hubble) on an
- existing installation.
-- One or more network policies through any of these options:
- - Use the [Container Network Policy editor](../../application_security/policies/index.md#container-network-policy-editor) to create and manage policies.
- - Use an [AutoDevOps](../../application_security/policies/index.md#container-network-policy) configuration.
- - Add the required labels and annotations to existing network policies.
-- A configuration repository with [Cilium configured in `config.yaml`](work_with_agent.md#surface-network-security-alerts-from-cluster-to-gitlab)
-
-The setup process follows the same [agent's installation steps](install/index.md),
-with the following differences:
-
-- When you define a configuration repository, you must do so with [Cilium settings](work_with_agent.md#surface-network-security-alerts-from-cluster-to-gitlab).
-- You do not need to specify the `gitops` configuration section.
-
-To integrate, add a top-level `cilium` section to your `config.yml` file. Currently, the
-only configuration option is the Hubble relay address:
-
-```yaml
-cilium:
- hubble_relay_address: "<hubble-relay-host>:<hubble-relay-port>"
-```
-
-If your Cilium integration was performed through [GitLab Managed Apps](../applications.md#install-cilium-using-gitlab-cicd) or the
-[cluster management template](../../project/clusters/protect/container_network_security/quick_start_guide.md#use-the-cluster-management-template-to-install-cilium),
-you can use `hubble-relay.gitlab-managed-apps.svc.cluster.local:80` as the address:
-
-```yaml
-cilium:
- hubble_relay_address: "hubble-relay.gitlab-managed-apps.svc.cluster.local:80"
-```
diff --git a/doc/user/clusters/applications.md b/doc/user/clusters/applications.md
index 2bcfea50ee3..73ee156dac1 100644
--- a/doc/user/clusters/applications.md
+++ b/doc/user/clusters/applications.md
@@ -43,13 +43,10 @@ Supported applications:
- [cert-manager](#install-cert-manager-using-gitlab-cicd)
- [Sentry](#install-sentry-using-gitlab-cicd)
- [GitLab Runner](#install-gitlab-runner-using-gitlab-cicd)
-- [Cilium](#install-cilium-using-gitlab-cicd)
-- [Falco](#install-falco-using-gitlab-cicd)
- [Vault](#install-vault-using-gitlab-cicd)
- [JupyterHub](#install-jupyterhub-using-gitlab-cicd)
- [Elastic Stack](#install-elastic-stack-using-gitlab-cicd)
- [Crossplane](#install-crossplane-using-gitlab-cicd)
-- [Fluentd](#install-fluentd-using-gitlab-cicd)
- [Knative](#install-knative-using-gitlab-cicd)
- [PostHog](#install-posthog-using-gitlab-cicd)
- [Prometheus](#install-prometheus-using-gitlab-cicd)
@@ -414,222 +411,6 @@ GitLab Runner group. If you run into unknown issues,
least 2 people from the
[Runner group](https://about.gitlab.com/handbook/product/categories/#runner-group).
-### Install Cilium using GitLab CI/CD
-
-> - [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/22) in GitLab 12.8.
-> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 13.12.
-
-[Cilium](https://cilium.io/) is a networking plugin for Kubernetes that you can use to implement
-support for [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
-resources. For more information, see [Network Policies](../../topics/autodevops/stages.md#network-policy).
-
-<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-For an overview, see the
-[Container Network Security Demo for GitLab 12.8](https://www.youtube.com/watch?v=pgUEdhdhoUI).
-
-Enable Cilium in the `.gitlab/managed-apps/config.yaml` file to install it:
-
-```yaml
-# possible values are gke or eks
-clusterType: gke
-
-cilium:
- installed: true
-```
-
-The `clusterType` variable enables the recommended Helm variables for a corresponding cluster type.
-You can check the recommended variables for each cluster type in the official documentation:
-
-- [Google GKE](https://docs.cilium.io/en/v1.8/gettingstarted/k8s-install-gke/#deploy-cilium)
-- [AWS EKS](https://docs.cilium.io/en/v1.8/gettingstarted/k8s-install-eks/#deploy-cilium)
-
-Do not use `clusterType` for sandbox environments like [minikube](https://minikube.sigs.k8s.io/docs/).
-
-You can customize Cilium's Helm variables by defining the
-`.gitlab/managed-apps/cilium/values.yaml` file in your cluster
-management project. Refer to the
-[Cilium chart](https://github.com/cilium/cilium/tree/master/install/kubernetes/cilium)
-for the available configuration options.
-
-You can check Cilium's installation status on the cluster management page:
-
-- [Project-level cluster](../project/clusters/index.md): Navigate to your project's
- **Infrastructure > Kubernetes clusters** page.
-- [Group-level cluster](../group/clusters/index.md): Navigate to your group's
- **Kubernetes** page.
-
-WARNING:
-Installation and removal of the Cilium requires a **manual**
-[restart](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/#restart-unmanaged-pods)
-of all affected pods in all namespaces to ensure that they are
-[managed](https://docs.cilium.io/en/v1.8/operations/troubleshooting/#ensure-managed-pod)
-by the correct networking plugin. Whenever Hubble is enabled, its related pod might require a
-restart depending on whether it started prior to Cilium. For more information, see
-[Failed Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#failed-deployment)
-in the Kubernetes docs.
-
-NOTE:
-Major upgrades might require additional setup steps. For more information, see
-the official [upgrade guide](https://docs.cilium.io/en/v1.8/operations/upgrade/).
-
-By default, Cilium's
-[audit mode](https://docs.cilium.io/en/v1.8/gettingstarted/policy-creation/#enable-policy-audit-mode)
-is enabled. In audit mode, Cilium doesn't drop disallowed packets. You
-can use `policy-verdict` log to observe policy-related decisions. You
-can disable audit mode by adding the following to
-`.gitlab/managed-apps/cilium/values.yaml`:
-
-```yaml
-config:
- policyAuditMode: false
-
-agent:
- monitor:
- eventTypes: ["drop"] # Note: possible values are documented at https://docs.cilium.io/en/stable/cmdref/cilium_monitor/
-```
-
-The Cilium monitor log for traffic is logged out by the
-`cilium-monitor` sidecar container. You can check these logs with the following command:
-
-```shell
-kubectl -n gitlab-managed-apps logs -l k8s-app=cilium -c cilium-monitor
-```
-
-You can disable the monitor log in `.gitlab/managed-apps/cilium/values.yaml`:
-
-```yaml
-agent:
- monitor:
- enabled: false
-```
-
-The [Hubble](https://github.com/cilium/hubble) monitoring daemon is enabled by default
-and it's set to collect per namespace flow metrics. This metrics are accessible on the
-[Threat Monitoring](../application_security/threat_monitoring/index.md)
-dashboard. You can disable Hubble by adding the following to
-`.gitlab/managed-apps/cilium/values.yaml`:
-
-```yaml
-global:
- hubble:
- enabled: false
-```
-
-You can also adjust Helm values for Hubble by using
-`.gitlab/managed-apps/cilium/values.yaml`:
-
-```yaml
-global:
- hubble:
- enabled: true
- metrics:
- enabled:
- - 'flow:sourceContext=namespace;destinationContext=namespace'
-```
-
-Support for installing the Cilium managed application is provided by the
-GitLab Container Security group. If you run into unknown issues,
-[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
-least 2 people from the
-[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).
-
-### Install Falco using GitLab CI/CD
-
-> - [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/91) in GitLab 13.1.
-> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 13.12.
-
-GitLab Container Host Security Monitoring uses [Falco](https://falco.org/)
-as a runtime security tool that listens to the Linux kernel using eBPF. Falco parses system calls
-and asserts the stream against a configurable rules engine in real-time. For more information, see
-[Falco's Documentation](https://falco.org/docs/).
-
-You can enable Falco in the
-`.gitlab/managed-apps/config.yaml` file:
-
-```yaml
-falco:
- installed: true
-```
-
-You can customize Falco's Helm variables by defining the
-`.gitlab/managed-apps/falco/values.yaml` file in your cluster
-management project. Refer to the
-[Falco chart](https://github.com/falcosecurity/charts/tree/master/falco)
-for the available configuration options.
-
-WARNING:
-By default eBPF support is enabled and Falco uses an
-[eBPF probe](https://falco.org/docs/event-sources/drivers/#using-the-ebpf-probe)
-to pass system calls to user space. If your cluster doesn't support this, you can
-configure it to use Falco kernel module instead by adding the following to
-`.gitlab/managed-apps/falco/values.yaml`:
-
-```yaml
-ebpf:
- enabled: false
-```
-
-In rare cases where probe installation on your cluster isn't possible and the kernel/probe
-isn't pre-compiled, you may need to manually prepare the kernel module or eBPF probe with
-[`driverkit`](https://github.com/falcosecurity/driverkit#against-a-kubernetes-cluster)
-and install it on each cluster node.
-
-By default, Falco is deployed with a limited set of rules. To add more rules, add
-the following to `.gitlab/managed-apps/falco/values.yaml` (you can get examples from
-[Cloud Native Security Hub](https://securityhub.dev/)):
-
-```yaml
-customRules:
- file-integrity.yaml: |-
- - rule: Detect New File
- desc: detect new file created
- condition: >
- evt.type = chmod or evt.type = fchmod
- output: >
- File below a known directory opened for writing (user=%user.name
- command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2])
- priority: ERROR
- tags: [filesystem]
- - rule: Detect New Directory
- desc: detect new directory created
- condition: >
- mkdir
- output: >
- File below a known directory opened for writing (user=%user.name
- command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2])
- priority: ERROR
- tags: [filesystem]
-```
-
-By default, Falco only outputs security events to logs as JSON objects. To set it to output to an
-[external API](https://falco.org/docs/alerts/#https-output-send-alerts-to-an-https-end-point)
-or [application](https://falco.org/docs/alerts/#program-output),
-add the following to `.gitlab/managed-apps/falco/values.yaml`:
-
-```yaml
-falco:
- programOutput:
- enabled: true
- keepAlive: false
- program: mail -s "Falco Notification" someone@example.com
-
- httpOutput:
- enabled: true
- url: http://some.url
-```
-
-You can check these logs with the following command:
-
-```shell
-kubectl -n gitlab-managed-apps logs -l app=falco
-```
-
-Support for installing the Falco managed application is provided by the
-GitLab Container Security group. If you run into unknown issues,
-[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
-least 2 people from the
-[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).
-
### Install Vault using GitLab CI/CD
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/9982) in GitLab 12.9.
@@ -855,38 +636,6 @@ Support for the Crossplane managed application is provided by the Crossplane tea
If you run into issues,
[open a support ticket](https://github.com/crossplane/crossplane/issues/new/choose) directly.
-### Install Fluentd using GitLab CI/CD
-
-> - [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/76) in GitLab 12.10.
-> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 13.12.
-
-To install Fluentd into the `gitlab-managed-apps` namespace of your cluster using
-GitLab CI/CD, define the following configuration in `.gitlab/managed-apps/config.yaml`:
-
-```yaml
-Fluentd:
- installed: true
-```
-
-You can also review the default values set for this chart in the
-[`values.yaml`](https://github.com/helm/charts/blob/master/stable/fluentd/values.yaml) file.
-
-You can customize the installation of Fluentd by defining
-`.gitlab/managed-apps/fluentd/values.yaml` file in your cluster management
-project. Refer to the
-[configuration chart](https://github.com/helm/charts/tree/master/stable/fluentd#configuration)
-for the current development release of Fluentd for all available configuration options.
-
-The configuration chart link points to the current development release, which
-may differ from the version you have installed. To ensure compatibility, switch
-to the specific branch or tag you are using.
-
-Support for installing the Fluentd managed application is provided by the
-GitLab Container Security group. If you run into unknown issues,
-[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
-least 2 people from the
-[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).
-
### Install Knative using GitLab CI/CD
> [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 13.12.
@@ -939,92 +688,6 @@ by running the following command:
kubectl delete -f https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/raw/02c8231e30ef5b6725e6ba368bc63863ceb3c07d/src/default-data/knative/istio-metrics.yaml
```
-### Install AppArmor using GitLab CI/CD
-
-> - [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/100) in GitLab 13.1.
-> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 13.12.
-
-To install AppArmor into the `gitlab-managed-apps` namespace of your cluster using
-GitLab CI/CD, define the following configuration in `.gitlab/managed-apps/config.yaml`:
-
-```yaml
-apparmor:
- installed: true
-```
-
-You can define one or more AppArmor profiles by adding them into
-`.gitlab/managed-apps/apparmor/values.yaml` as the following:
-
-```yaml
-profiles:
- profile-one: |-
- profile profile-one {
- file,
- }
-```
-
-Refer to the [AppArmor chart](https://gitlab.com/gitlab-org/charts/apparmor) for more information on this chart.
-
-#### Using AppArmor profiles in your deployments
-
-After installing AppAmor, you can use profiles by adding Pod Annotations. If you're using
-Auto DevOps, you can [customize `auto-deploy-values.yaml`](../../topics/autodevops/customize.md#customize-values-for-helm-chart)
-to annotate your pods. Although it's helpful to be aware of the
-[list of custom attributes](https://gitlab.com/gitlab-org/cluster-integration/auto-deploy-image/-/tree/master/assets/auto-deploy-app#gitlabs-auto-deploy-helm-chart),
-you're only required to set `podAnnotations` as follows:
-
-```yaml
-podAnnotations:
- container.apparmor.security.beta.kubernetes.io/auto-deploy-app: localhost/profile-one
-```
-
-The only information to be changed here is the profile name which is `profile-one`
-in this example. Refer to the
-[AppArmor tutorial](https://kubernetes.io/docs/tutorials/security/apparmor/#securing-a-pod)
-for more information on how AppArmor is integrated in Kubernetes.
-
-#### Using PodSecurityPolicy in your deployments
-
-To enable AppArmor annotations on a Pod Security Policy you must first
-load the corresponding AppArmor profile.
-
-[Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) are
-resources at the cluster level that control security-related
-properties of deployed pods. You can use such a policy to enable
-loaded AppArmor profiles and apply necessary pod restrictions across a
-cluster. You can deploy a new policy by adding the following
-to`.gitlab/managed-apps/apparmor/values.yaml`:
-
-```yaml
-securityPolicies:
- example:
- defaultProfile: profile-one
- allowedProfiles:
- - profile-one
- - profile-two
- spec:
- privileged: false
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- runAsUser:
- rule: RunAsAny
- fsGroup:
- rule: RunAsAny
- volumes:
- - '*'
-```
-
-This example creates a single policy named `example` with the provided specification,
-and enables [AppArmor annotations](https://kubernetes.io/docs/tutorials/security/apparmor/#podsecuritypolicy-annotations) on it.
-
-Support for installing the AppArmor managed application is provided by the
-GitLab Container Security group. If you run into unknown issues,
-[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping
-at least 2 people from the
-[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).
-
## Install with one click (REMOVED)
> [Removed](https://gitlab.com/groups/gitlab-org/-/epics/4280) in GitLab 14.0.
diff --git a/doc/user/clusters/management_project_template.md b/doc/user/clusters/management_project_template.md
index 03a63f7b5fe..a5f227751bb 100644
--- a/doc/user/clusters/management_project_template.md
+++ b/doc/user/clusters/management_project_template.md
@@ -93,12 +93,8 @@ application in the template.
The [built-in supported applications](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/tree/master/applications) are:
-- [Apparmor](../infrastructure/clusters/manage/management_project_applications/apparmor.md)
- [Cert-manager](../infrastructure/clusters/manage/management_project_applications/certmanager.md)
-- [Cilium](../infrastructure/clusters/manage/management_project_applications/cilium.md)
- [Elastic Stack](../infrastructure/clusters/manage/management_project_applications/elasticstack.md)
-- [Falco](../infrastructure/clusters/manage/management_project_applications/falco.md)
-- [Fluentd](../infrastructure/clusters/manage/management_project_applications/fluentd.md)
- [GitLab Runner](../infrastructure/clusters/manage/management_project_applications/runner.md)
- [Ingress](../infrastructure/clusters/manage/management_project_applications/ingress.md)
- [Prometheus](../infrastructure/clusters/manage/management_project_applications/prometheus.md)
diff --git a/doc/user/infrastructure/clusters/manage/management_project_applications/apparmor.md b/doc/user/infrastructure/clusters/manage/management_project_applications/apparmor.md
deleted file mode 100644
index ae335a180e8..00000000000
--- a/doc/user/infrastructure/clusters/manage/management_project_applications/apparmor.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Install AppArmor with a cluster management project **(FREE)**
-
-> [Introduced](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/merge_requests/5) in GitLab 14.0.
-
-Assuming you already have a [Cluster management project](../../../../../user/clusters/management_project.md) created from a
-[management project template](../../../../../user/clusters/management_project_template.md), to install AppArmor you should
-uncomment this line from your `helmfile.yaml`:
-
-```yaml
- - path: applications/apparmor/helmfile.yaml
-```
-
-You can define one or more AppArmor profiles by adding them into
-`applications/apparmor/values.yaml` as the following:
-
-```yaml
-profiles:
- profile-one: |-
- profile profile-one {
- file,
- }
-```
-
-Refer to the [AppArmor chart](https://gitlab.com/gitlab-org/charts/apparmor) for more information on this chart.
diff --git a/doc/user/infrastructure/clusters/manage/management_project_applications/cilium.md b/doc/user/infrastructure/clusters/manage/management_project_applications/cilium.md
deleted file mode 100644
index 5d704a2c6df..00000000000
--- a/doc/user/infrastructure/clusters/manage/management_project_applications/cilium.md
+++ /dev/null
@@ -1,122 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Install Cilium with a cluster management project **(FREE)**
-
-> [Introduced](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/merge_requests/5) in GitLab 14.0.
-
-[Cilium](https://cilium.io/) is a networking plugin for Kubernetes that you can use to implement
-support for [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
-resources. For more information, see [Network Policies](../../../../../topics/autodevops/stages.md#network-policy).
-
-<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-For an overview, see the
-[Container Network Security Demo for GitLab 12.8](https://www.youtube.com/watch?v=pgUEdhdhoUI).
-
-Assuming you already have a [Cluster management project](../../../../../user/clusters/management_project.md) created from a
-[management project template](../../../../../user/clusters/management_project_template.md), to install cilium you should
-uncomment this line from your `helmfile.yaml`:
-
-```yaml
- - path: applications/cilium/helmfile.yaml
-```
-
-and update the `applications/cilium/values.yaml` to set the `clusterType`:
-
-```yaml
-# possible values are gke or eks
-clusterType: gke
-```
-
-The `clusterType` variable enables the recommended Helm variables for a corresponding cluster type.
-You can check the recommended variables for each cluster type in the official documentation:
-
-- [Google GKE](https://docs.cilium.io/en/v1.8/gettingstarted/k8s-install-gke/#deploy-cilium)
-- [AWS EKS](https://docs.cilium.io/en/v1.8/gettingstarted/k8s-install-eks/#deploy-cilium)
-
-Do not use `clusterType` for sandbox environments like [minikube](https://minikube.sigs.k8s.io/docs/).
-
-You can customize Cilium's Helm variables by defining the
-`applications/cilium/values.yaml` file in your cluster
-management project. Refer to the
-[Cilium chart](https://github.com/cilium/cilium/tree/master/install/kubernetes/cilium)
-for the available configuration options.
-
-You can check Cilium's installation status on the cluster management page:
-
-- [Project-level cluster](../../../../project/clusters/index.md): Navigate to your project's
- **Infrastructure > Kubernetes clusters** page.
-- [Group-level cluster](../../../../group/clusters/index.md): Navigate to your group's
- **Kubernetes** page.
-
-WARNING:
-Installation and removal of the Cilium requires a **manual**
-[restart](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/#restart-unmanaged-pods)
-of all affected pods in all namespaces to ensure that they are
-[managed](https://docs.cilium.io/en/v1.8/operations/troubleshooting/#ensure-managed-pod)
-by the correct networking plugin. Whenever Hubble is enabled, its related pod might require a
-restart depending on whether it started prior to Cilium. For more information, see
-[Failed Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#failed-deployment)
-in the Kubernetes docs.
-
-NOTE:
-Major upgrades might require additional setup steps. For more information, see
-the official [upgrade guide](https://docs.cilium.io/en/v1.8/operations/upgrade/).
-
-By default, Cilium's
-[audit mode](https://docs.cilium.io/en/v1.8/gettingstarted/policy-creation/#enable-policy-audit-mode)
-is enabled. In audit mode, Cilium doesn't drop disallowed packets. You
-can use `policy-verdict` log to observe policy-related decisions. You
-can disable audit mode by adding the following to
-`applications/cilium/values.yaml`:
-
-```yaml
-config:
- policyAuditMode: false
-
-agent:
- monitor:
- eventTypes: ["drop"]
-```
-
-The Cilium monitor log for traffic is logged out by the
-`cilium-monitor` sidecar container. You can check these logs with the following command:
-
-```shell
-kubectl -n gitlab-managed-apps logs -l k8s-app=cilium -c cilium-monitor
-```
-
-You can disable the monitor log in `.gitlab/managed-apps/cilium/values.yaml`:
-
-```yaml
-agent:
- monitor:
- enabled: false
-```
-
-The [Hubble](https://github.com/cilium/hubble) monitoring daemon is enabled by default
-and it's set to collect per namespace flow metrics. This metrics are accessible on the
-[Threat Monitoring](../../../../application_security/threat_monitoring/index.md)
-dashboard. You can disable Hubble by adding the following to
-`applications/cilium/values.yaml`:
-
-```yaml
-global:
- hubble:
- enabled: false
-```
-
-You can also adjust Helm values for Hubble by using
-`applications/cilium/values.yaml`:
-
-```yaml
-global:
- hubble:
- enabled: true
- metrics:
- enabled:
- - 'flow:sourceContext=namespace;destinationContext=namespace'
-```
diff --git a/doc/user/infrastructure/clusters/manage/management_project_applications/falco.md b/doc/user/infrastructure/clusters/manage/management_project_applications/falco.md
deleted file mode 100644
index 50401e9a391..00000000000
--- a/doc/user/infrastructure/clusters/manage/management_project_applications/falco.md
+++ /dev/null
@@ -1,95 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Install Falco with a cluster management project **(FREE)**
-
-> [Introduced](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/merge_requests/5) in GitLab 14.0.
-
-GitLab Container Host Security Monitoring uses [Falco](https://falco.org/)
-as a runtime security tool that listens to the Linux kernel using eBPF. Falco parses system calls
-and asserts the stream against a configurable rules engine in real-time. For more information, see
-[Falco's Documentation](https://falco.org/docs/).
-
-Assuming you already have a [Cluster management project](../../../../../user/clusters/management_project.md) created from a
-[management project template](../../../../../user/clusters/management_project_template.md), to install Falco you should
-uncomment this line from your `helmfile.yaml`:
-
-```yaml
- - path: applications/falco/helmfile.yaml
-```
-
-You can customize Falco's Helm variables by defining the
-`applications/falco/values.yaml` file in your cluster
-management project. Refer to the
-[Falco chart](https://github.com/falcosecurity/charts/tree/master/falco)
-for the available configuration options.
-
-WARNING:
-By default eBPF support is enabled and Falco uses an
-[eBPF probe](https://falco.org/docs/event-sources/drivers/#using-the-ebpf-probe)
-to pass system calls to user space. If your cluster doesn't support this, you can
-configure it to use Falco kernel module instead by adding the following to
-`applications/falco/values.yaml`:
-
-```yaml
-ebpf:
- enabled: false
-```
-
-In rare cases where probe installation on your cluster isn't possible and the kernel/probe
-isn't pre-compiled, you may need to manually prepare the kernel module or eBPF probe with
-[`driverkit`](https://github.com/falcosecurity/driverkit#against-a-kubernetes-cluster)
-and install it on each cluster node.
-
-By default, Falco is deployed with a limited set of rules. To add more rules, add
-the following to `applications/falco/values.yaml` (you can get examples from
-[Cloud Native Security Hub](https://securityhub.dev/)):
-
-```yaml
-customRules:
- file-integrity.yaml: |-
- - rule: Detect New File
- desc: detect new file created
- condition: >
- evt.type = chmod or evt.type = fchmod
- output: >
- File below a known directory opened for writing (user=%user.name
- command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2])
- priority: ERROR
- tags: [filesystem]
- - rule: Detect New Directory
- desc: detect new directory created
- condition: >
- mkdir
- output: >
- File below a known directory opened for writing (user=%user.name
- command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2])
- priority: ERROR
- tags: [filesystem]
-```
-
-By default, Falco only outputs security events to logs as JSON objects. To set it to output to an
-[external API](https://falco.org/docs/alerts/#https-output-send-alerts-to-an-https-end-point)
-or [application](https://falco.org/docs/alerts/#program-output),
-add the following to `applications/falco/values.yaml`:
-
-```yaml
-falco:
- programOutput:
- enabled: true
- keepAlive: false
- program: mail -s "Falco Notification" someone@example.com
-
- httpOutput:
- enabled: true
- url: http://some.url
-```
-
-You can check these logs with the following command:
-
-```shell
-kubectl -n gitlab-managed-apps logs -l app=falco
-```
diff --git a/doc/user/infrastructure/clusters/manage/management_project_applications/fluentd.md b/doc/user/infrastructure/clusters/manage/management_project_applications/fluentd.md
deleted file mode 100644
index ea3a3503f9b..00000000000
--- a/doc/user/infrastructure/clusters/manage/management_project_applications/fluentd.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Install Fluentd with a cluster management project **(FREE)**
-
-> [Introduced](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/merge_requests/5) in GitLab 14.0.
-
-Assuming you already have a [Cluster management project](../../../../../user/clusters/management_project.md) created from a
-[management project template](../../../../../user/clusters/management_project_template.md), to install Fluentd you should
-uncomment this line from your `helmfile.yaml`:
-
-```yaml
- - path: applications/fluentd/helmfile.yaml
-```
-
-You can also review the default values set for this chart in the
-[`values.yaml`](https://github.com/helm/charts/blob/master/stable/fluentd/values.yaml) file.
-
-You can customize the installation of Fluentd by defining
-`applications/fluentd/values.yaml` file in your cluster management
-project. Refer to the
-[configuration chart](https://github.com/helm/charts/tree/master/stable/fluentd#configuration)
-for the current development release of Fluentd for all available configuration options.
-
-The configuration chart link points to the current development release, which
-may differ from the version you have installed. To ensure compatibility, switch
-to the specific branch or tag you are using.
diff --git a/doc/user/infrastructure/iac/img/terraform_list_view_v13_8.png b/doc/user/infrastructure/iac/img/terraform_list_view_v13_8.png
deleted file mode 100644
index 6eb85285e81..00000000000
--- a/doc/user/infrastructure/iac/img/terraform_list_view_v13_8.png
+++ /dev/null
Binary files differ
diff --git a/doc/user/infrastructure/iac/terraform_state.md b/doc/user/infrastructure/iac/terraform_state.md
index 7277a9c714a..f56fe92ec01 100644
--- a/doc/user/infrastructure/iac/terraform_state.md
+++ b/doc/user/infrastructure/iac/terraform_state.md
@@ -8,58 +8,43 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/2673) in GitLab 13.0.
-[Terraform remote backends](https://www.terraform.io/language/settings/backends)
-enable you to store the state file in a remote, shared store. GitLab uses the
-[Terraform HTTP backend](https://www.terraform.io/language/settings/backends/http)
-to securely store the state files in local storage (the default) or
-[the remote store of your choice](../../../administration/terraform_state.md).
+Terraform uses state files to store details about your infrastructure configuration.
+With Terraform remote [backends](https://www.terraform.io/language/settings/backends),
+you can store the state file in a remote and shared store.
-WARNING:
-Using local storage (the default) on clustered deployments of GitLab will result in
-a split state across nodes, making subsequent executions of Terraform inconsistent.
-You are highly advised to use a remote storage resource in that case.
-
-The GitLab-managed Terraform state backend can store your Terraform state easily and
-securely, and spares you from setting up additional remote resources like
-Amazon S3 or Google Cloud Storage. Its features include:
-
-- Versioning of Terraform state files.
-- Supporting encryption of the state file both in transit and at rest.
-- Locking and unlocking state.
-- Remote Terraform plan and apply execution.
-
-A GitLab **administrator** must [set up the Terraform state storage configuration](../../../administration/terraform_state.md)
-before using this feature.
+GitLab provides a [Terraform HTTP backend](https://www.terraform.io/language/settings/backends/http)
+to securely store your state files with minimal configuration.
-## Permissions for using Terraform
+In GitLab, you can:
-In GitLab version 13.1, at least the Maintainer role was required to use a
-GitLab managed Terraform state backend.
+- Version your Terraform state files.
+- Encrypt the state file both in transit and at rest.
+- Lock and unlock states.
+- Remotely execute `terraform plan` and `terraform apply` commands.
-In GitLab versions 13.2 and later, at least:
+For self-managed instances, before you can use GitLab for your Terraform state files,
+an administrator must [set up Terraform state storage](../../../administration/terraform_state.md).
-- The Maintainer role is required to lock, unlock, and write to the state (using `terraform apply`).
-- The Developer role is required to read the state (using `terraform plan -lock=false`).
+## Initialize a Terraform state as a backend by using GitLab CI/CD
-## Set up GitLab-managed Terraform state
+After you execute the `terraform init` command, you can use GitLab CI/CD
+to run `terraform` commands.
-To get started with a GitLab-managed Terraform state, there are two different options:
+Prerequisites:
-- [Use a local machine](#get-started-using-local-development).
-- [Use GitLab CI](#get-started-using-gitlab-ci).
+- To lock, unlock, and write to the state by using `terraform apply`, you must have at least the Maintainer role.
+- To read the state by using `terraform plan -lock=false`, you must have at least the Developer role.
-Terraform States can be found by navigating to a Project's
-**{cloud-gear}** **Infrastructure > Terraform** page.
-
-### Get started using local development
+WARNING:
+Like any other job artifact, Terraform plan data is viewable by anyone with the Guest role on the repository.
+Neither Terraform nor GitLab encrypts the plan file by default. If your Terraform plan
+includes sensitive data, like passwords, access tokens, or certificates, you should
+encrypt plan output or modify the project visibility settings.
-If you plan to only run `terraform plan` and `terraform apply` commands from your
-local machine, this is a simple way to get started:
+To configure GitLab CI/CD as a backend:
-1. Create your project on your GitLab instance.
-1. Navigate to **Settings > General** and note your **Project name**
- and **Project ID**.
-1. Define the Terraform backend in your Terraform project to be:
+1. In your Terraform project, in a `.tf` file like `backend.tf`,
+ define the [HTTP backend](https://www.terraform.io/docs/language/settings/backends/http.html):
```hcl
terraform {
@@ -68,172 +53,51 @@ local machine, this is a simple way to get started:
}
```
-1. Create a [Personal Access Token](../../profile/personal_access_tokens.md) with
- the `api` scope.
-
-1. On your local machine, run `terraform init`, passing in the following options,
- replacing `<YOUR-STATE-NAME>`, `<YOUR-PROJECT-ID>`, `<YOUR-USERNAME>` and
- `<YOUR-ACCESS-TOKEN>` with the relevant values. This command initializes your
- Terraform state, and stores that state in your GitLab project. This example
- uses `gitlab.com`:
-
- ```shell
- terraform init \
- -backend-config="address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-STATE-NAME>" \
- -backend-config="lock_address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-STATE-NAME>/lock" \
- -backend-config="unlock_address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-STATE-NAME>/lock" \
- -backend-config="username=<YOUR-USERNAME>" \
- -backend-config="password=<YOUR-ACCESS-TOKEN>" \
- -backend-config="lock_method=POST" \
- -backend-config="unlock_method=DELETE" \
- -backend-config="retry_wait_min=5"
- ```
-
- WARNING:
- The name of your state can contain only uppercase and lowercase letters, decimal digits,
- hyphens, and underscores.
-
-If you already have a GitLab-managed Terraform state, you can use the `terraform init` command
-with the pre-populated parameters values:
-
-1. On the top bar, select **Menu > Projects** and find your project.
-1. On the left sidebar, select **Infrastructure > Terraform**.
-1. Next to the environment you want to use, select the [Actions menu](#managing-state-files)
- **{ellipsis_v}** and select **Copy Terraform init command**.
-
-You can now run `terraform plan` and `terraform apply` as you normally would.
-
-### Get started using GitLab CI
-
-If you don't want to start with local development, you can also use GitLab CI to
-run your `terraform plan` and `terraform apply` commands.
-
-Next, [configure the backend](#configure-the-backend).
-
-#### Configure the backend
+1. In the root directory of your project repository, create a `.gitlab-ci.yml` file. Use
+ [this file](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform.gitlab-ci.yml)
+ to populate it.
+
+1. Push your project to GitLab. This action triggers a pipeline, which
+ runs the `gitlab-terraform init`, `gitlab-terraform validate`, and
+ `gitlab-terraform plan` commands.
+1. Trigger the manual `terraform apply` job from the previous pipeline to provision the defined infrastructure.
-After executing the `terraform init` command, you must configure the Terraform backend
-and the CI YAML file:
+The output from the above `terraform` commands should be viewable in the job logs.
-1. In your Terraform project, define the [HTTP backend](https://www.terraform.io/docs/language/settings/backends/http.html)
- by adding the following code block in a `.tf` file (such as `backend.tf`) to
- define the remote backend:
+## Access the state from your local machine
- ```hcl
- terraform {
- backend "http" {
- }
- }
- ```
+You can access the GitLab-managed Terraform state from your local machine.
-1. In the root directory of your project repository, configure a
- `.gitlab-ci.yml` file. This example uses a pre-built image which includes a
- `gitlab-terraform` helper. For supported Terraform versions, see the [GitLab
- Terraform Images project](https://gitlab.com/gitlab-org/terraform-images).
+WARNING:
+On clustered deployments of GitLab, you should not use local storage.
+A split state can occur across nodes, making subsequent Terraform executions
+inconsistent. Instead, use a remote storage resource.
- ```yaml
- image: registry.gitlab.com/gitlab-org/terraform-images/stable:latest
- ```
+1. Ensure the Terraform state has been
+ [initialized for CI/CD](#initialize-a-terraform-state-as-a-backend-by-using-gitlab-cicd).
+1. Copy a pre-populated Terraform `init` command:
-1. In the `.gitlab-ci.yml` file, define some CI/CD variables to ease
- development. In this example, `TF_ROOT` is the directory where the Terraform
- commands must be executed, `TF_ADDRESS` is the URL to the state on the GitLab
- instance where this pipeline runs, and the final path segment in `TF_ADDRESS`
- is the name of the Terraform state. Projects may have multiple states, and
- this name is arbitrary, so in this example we set it to `example-production`
- which corresponds with the directory we're using as our `TF_ROOT`, and we
- ensure that the `.terraform` directory is cached between jobs in the pipeline
- using a cache key based on the state name (`example-production`):
-
- ```yaml
- variables:
- TF_ROOT: ${CI_PROJECT_DIR}/environments/example/production
- TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/example-production
-
- cache:
- key: example-production
- paths:
- - ${TF_ROOT}/.terraform
- ```
+ 1. On the top bar, select **Menu > Projects** and find your project.
+ 1. On the left sidebar, select **Infrastructure > Terraform**.
+ 1. Next to the environment you want to use, select **Actions**
+ (**{ellipsis_v}**) and select **Copy Terraform init command**.
-1. In a `before_script`, change to your `TF_ROOT`:
-
- ```yaml
- before_script:
- - cd ${TF_ROOT}
-
- stages:
- - prepare
- - validate
- - build
- - deploy
-
- init:
- stage: prepare
- script:
- - gitlab-terraform init
-
- validate:
- stage: validate
- script:
- - gitlab-terraform validate
-
- plan:
- stage: build
- script:
- - gitlab-terraform plan
- - gitlab-terraform plan-json
- artifacts:
- name: plan
- paths:
- - ${TF_ROOT}/plan.cache
- reports:
- terraform: ${TF_ROOT}/plan.json
-
- apply:
- stage: deploy
- environment:
- name: production
- script:
- - gitlab-terraform apply
- dependencies:
- - plan
- when: manual
- rules:
- - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
- ```
-
-1. Push your project to GitLab, which triggers a CI job pipeline. This pipeline
- runs the `gitlab-terraform init`, `gitlab-terraform validate`, and
- `gitlab-terraform plan` commands.
+1. Open a terminal and run this command on your local machine.
-The output from the above `terraform` commands should be viewable in the job logs.
+## Migrate to a GitLab-managed Terraform state
-WARNING:
-Like any other job artifact, Terraform plan data is viewable by anyone with the Guest role on the repository.
-Neither Terraform nor GitLab encrypts the plan file by default. If your Terraform plan
-includes sensitive data such as passwords, access tokens, or certificates, GitLab strongly
-recommends encrypting plan output or modifying the project visibility settings.
+Terraform supports copying the state when the backend changes or is
+reconfigured. Use these actions to migrate from another backend to
+GitLab-managed Terraform state.
-### Example project
+You should use a local terminal to run the commands needed for migrating to GitLab-managed Terraform state.
-See [this reference project](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-aws) using GitLab and Terraform to deploy a basic AWS EC2 in a custom VPC.
+The following example demonstrates how to change the state name. The same workflow is needed to migrate to GitLab-managed Terraform state from a different state storage backend.
-## Using a GitLab-managed Terraform state backend as a remote data source
+## Use your GitLab backend as a remote data source
-You can use a GitLab-managed Terraform state as a
+You can use a GitLab-managed Terraform state backend as a
[Terraform data source](https://www.terraform.io/language/state/remote-state-data).
-To use your existing Terraform state backend as a data source, provide the following details
-as [Terraform input variables](https://www.terraform.io/language/values/variables):
-
-- **address**: The URL of the remote state backend you want to use as a data source.
- For example, `https://gitlab.com/api/v4/projects/<TARGET-PROJECT-ID>/terraform/state/<TARGET-STATE-NAME>`.
-- **username**: The username to authenticate with the data source. If you are using a [Personal Access Token](../../profile/personal_access_tokens.md) for
- authentication, this is your GitLab username. If you are using GitLab CI, this is `'gitlab-ci-token'`.
-- **password**: The password to authenticate with the data source. If you are using a Personal Access Token for
- authentication, this is the token value. If you are using GitLab CI, it is the contents of the `${CI_JOB_TOKEN}` CI/CD variable.
-
-An example setup is shown below:
1. Create a file named `example.auto.tfvars` with the following contents:
@@ -243,7 +107,7 @@ An example setup is shown below:
example_access_token=<GitLab Personal Access Token>
```
-1. Define the data source by adding the following code block in a `.tf` file (such as `data.tf`):
+1. In a `.tf` file, define the data source by using [Terraform input variables](https://www.terraform.io/language/values/variables):
```hcl
data "terraform_remote_state" "example" {
@@ -257,21 +121,20 @@ An example setup is shown below:
}
```
+ - **address**: The URL of the remote state backend you want to use as a data source.
+ For example, `https://gitlab.com/api/v4/projects/<TARGET-PROJECT-ID>/terraform/state/<TARGET-STATE-NAME>`.
+ - **username**: The username to authenticate with the data source. If you are using
+ a [Personal Access Token](../../profile/personal_access_tokens.md) for
+ authentication, this value is your GitLab username. If you are using GitLab CI/CD, this value is `'gitlab-ci-token'`.
+ - **password**: The password to authenticate with the data source. If you are using a Personal Access Token for
+ authentication, this value is the token value. If you are using GitLab CI/CD, this value is the contents of the `${CI_JOB_TOKEN}` CI/CD variable.
+
Outputs from the data source can now be referenced in your Terraform resources
using `data.terraform_remote_state.example.outputs.<OUTPUT-NAME>`.
-You need at least the Developer role in the target project
-to read the Terraform state.
+To read the Terraform state in the target project, you need at least the Developer role.
-## Migrating to GitLab-managed Terraform state
-
-Terraform supports copying the state when the backend is changed or
-reconfigured. This can be useful if you need to migrate from another backend to
-GitLab-managed Terraform state. Using a local terminal is recommended to run the commands needed for migrating to GitLab-managed Terraform state.
-
-The following example demonstrates how to change the state name, the same workflow is needed to migrate to GitLab-managed Terraform state from a different state storage backend.
-
-### Setting up the initial backend
+### Set up the initial backend
```shell
PROJECT_ID="<gitlab-project-id>"
@@ -309,7 +172,7 @@ re-run this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.
```
-### Changing the backend
+### Change the backend
Now that `terraform init` has created a `.terraform/` directory that knows where
the old state is, you can tell it about the new location:
@@ -366,94 +229,54 @@ commands will detect it and remind you to do so if necessary.
If you type `yes`, it copies your state from the old location to the new
location. You can then go back to running it in GitLab CI/CD.
-## Managing state files
+## Manage Terraform state files
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/273592) in GitLab 13.8.
-Users with at least the Developer role can view the
-state files attached to a project at **Infrastructure > Terraform**. Users with the
-Maintainer role can perform commands on the state files. The user interface
-contains these fields:
-
-![Terraform state list](img/terraform_list_view_v13_8.png)
+To view Terraform state files:
-- **Name**: The name of the environment, with a locked (**{lock}**) icon if the
- state file is locked.
-- **Pipeline**: A link to the most recent pipeline and its status.
-- **Details**: Information about when the state file was created or changed.
-- **Actions**: Actions you can take on the state file, including copying the `terraform init` command,
- downloading, locking, unlocking, or [removing](#remove-a-state-file) the state file and versions.
+1. On the top bar, select **Menu > Projects** and find your project.
+1. On the left sidebar, select **Infrastructure > Terraform**.
-NOTE:
-Additional improvements to the
-[graphical interface for managing state files](https://gitlab.com/groups/gitlab-org/-/epics/4563)
-are planned.
+[An epic exists](https://gitlab.com/groups/gitlab-org/-/epics/4563) to track improvements to this UI.
-## Manage individual Terraform state versions
+### Manage individual Terraform state versions
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/207347) in GitLab 13.4.
Individual state versions can be managed using the GitLab REST API.
-Users with the [Developer role](../../permissions.md) can retrieve state versions using their serial number. To retrieve a version:
+If you have at least the Developer role, you can retrieve state versions by using their serial number::
```shell
curl --header "Private-Token: <your_access_token>" "https://gitlab.example.com/api/v4/projects/<your_project_id>/terraform/state/<your_state_name>/versions/<version-serial>"
```
-Users with the [Maintainer role](../../permissions.md) can remove state versions using their serial number. To remove a version:
+If you have at least the Maintainer role, you can remove state versions by using their serial number:
```shell
curl --header "Private-Token: <your_access_token>" --request DELETE "https://gitlab.example.com/api/v4/projects/<your_project_id>/terraform/state/<your_state_name>/versions/<version-serial>"
```
-## Remove a state file
-
-Users with at least the Maintainer role can use the
-following options to remove a state file:
+### Remove a state file
-- **GitLab UI**: Go to **Infrastructure > Terraform**. In the **Actions** column,
- click the vertical ellipsis (**{ellipsis_v}**) button and select
- **Remove state file and versions**.
-- **GitLab REST API**: You can remove a state file by making a request to the
- REST API. For example:
+If you have at least the Maintainer role, you can remove a state file.
- ```shell
- curl --header "Private-Token: <your_access_token>" --request DELETE "https://gitlab.example.com/api/v4/projects/<your_project_id>/terraform/state/<your_state_name>"
- ```
-
-- [GitLab GraphQL API](#remove-a-state-file-with-the-gitlab-graphql-api).
-
-### Remove a state file with the GitLab GraphQL API
-
-You can remove a state file by making a GraphQL API request. For example:
+1. On the left sidebar, select **Infrastructure > Terraform**.
+1. In the **Actions** column, select **Actions** (**{ellipsis_v}**) and then **Remove state file and versions**.
-```shell
-mutation deleteState {
- terraformStateDelete(input: { id: "<global_id_for_the_state>" }) {
- errors
- }
-}
-```
+### Remove a state file by using the API
-You can obtain the `<global_id_for_the_state>` by querying the list of states:
+You can remove a state file by making a request to the REST API. For example:
```shell
-query ProjectTerraformStates {
- project(fullPath: "<your_project_path>") {
- terraformStates {
- nodes {
- id
- name
- }
- }
- }
-}
+curl --header "Private-Token: <your_access_token>" --request DELETE "https://gitlab.example.com/api/v4/projects/<your_project_id>/terraform/state/<your_state_name>"
```
-For those new to the GitLab GraphQL API, read
-[Getting started with GitLab GraphQL API](../../../api/graphql/getting_started.md).
+You can also use [the GraphQL API](../../../api/graphql/reference/index.md#mutationterraformstatedelete).
## Related topics
- [Troubleshooting GitLab-managed Terraform state](troubleshooting.md).
+- To use GitLab and Terraform to deploy an AWS EC2 instance in a custom VPC,
+ see [this sample project](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-aws).
diff --git a/doc/user/infrastructure/iac/troubleshooting.md b/doc/user/infrastructure/iac/troubleshooting.md
index bc0aa39bc70..20c6974717d 100644
--- a/doc/user/infrastructure/iac/troubleshooting.md
+++ b/doc/user/infrastructure/iac/troubleshooting.md
@@ -80,7 +80,7 @@ This happens because the value of `$CI_JOB_TOKEN` is only valid for the duration
As a workaround, use [http backend configuration variables](https://www.terraform.io/docs/language/settings/backends/http.html#configuration-variables) in your CI job,
which is what happens behind the scenes when following the
-[Get started using GitLab CI](terraform_state.md#get-started-using-gitlab-ci) instructions.
+[Get started using GitLab CI](terraform_state.md#initialize-a-terraform-state-as-a-backend-by-using-gitlab-cicd) instructions.
### Error: "address": required field is not set
diff --git a/doc/user/permissions.md b/doc/user/permissions.md
index e7b119c89ef..134bc0c4206 100644
--- a/doc/user/permissions.md
+++ b/doc/user/permissions.md
@@ -67,7 +67,6 @@ The following table lists project permissions available for each role:
| [Application security](application_security/index.md):<br>Create and run [on-demand DAST scans](application_security/dast/index.md#on-demand-scans) | | | ✓ | ✓ | ✓ |
| [Application security](application_security/index.md):<br>Manage [security policy](application_security/policies/index.md) | | | ✓ | ✓ | ✓ |
| [Application security](application_security/index.md):<br>View [dependency list](application_security/dependency_list/index.md) | | | ✓ | ✓ | ✓ |
-| [Application security](application_security/index.md):<br>View [threats list](application_security/threat_monitoring/index.md#threat-monitoring) | | | ✓ | ✓ | ✓ |
| [Application security](application_security/index.md):<br>Create a [CVE ID Request](application_security/cve_id_request.md) | | | | ✓ | ✓ |
| [Application security](application_security/index.md):<br>Create or assign [security policy project](application_security/policies/index.md) | | | | | ✓ |
| [Clusters](infrastructure/clusters/index.md):<br>View [pod logs](project/clusters/kubernetes_pod_logs.md) | | | ✓ | ✓ | ✓ |
diff --git a/doc/user/profile/personal_access_tokens.md b/doc/user/profile/personal_access_tokens.md
index 8b6c05796a5..46dca48d3b8 100644
--- a/doc/user/profile/personal_access_tokens.md
+++ b/doc/user/profile/personal_access_tokens.md
@@ -23,7 +23,7 @@ Personal access tokens are:
- Required when [two-factor authentication (2FA)](account/two_factor_authentication.md) is enabled.
- Used with a GitLab username to authenticate with GitLab features that require usernames. For example,
- [GitLab managed Terraform state backend](../infrastructure/iac/terraform_state.md#using-a-gitlab-managed-terraform-state-backend-as-a-remote-data-source)
+ [GitLab-managed Terraform state backend](../infrastructure/iac/terraform_state.md#use-your-gitlab-backend-as-a-remote-data-source)
and [Docker container registry](../packages/container_registry/index.md#authenticate-with-the-container-registry),
- Similar to [project access tokens](../project/settings/project_access_tokens.md) and [group access tokens](../group/settings/group_access_tokens.md), but are attached
to a user rather than a project or group.
diff --git a/doc/user/project/clusters/protect/container_host_security/index.md b/doc/user/project/clusters/protect/container_host_security/index.md
deleted file mode 100644
index c897100f14e..00000000000
--- a/doc/user/project/clusters/protect/container_host_security/index.md
+++ /dev/null
@@ -1,66 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Container Host Security **(FREE)**
-
-> [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476) in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477) in GitLab 15.0.
-
-WARNING:
-Container Host Security is in its end-of-life process. It's [deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476)
-in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477)
-in GitLab 15.0.
-
-Container Host Security in GitLab provides Intrusion Detection and Prevention capabilities that can
-monitor and (optionally) block activity inside the containers themselves. This is done by leveraging
-an integration with Falco to provide the monitoring capabilities and an integration with Pod
-Security Policies and AppArmor to provide blocking capabilities.
-
-## Overview
-
-Container Host Security can be used to monitor and block activity inside a container as well as to
-enforce security policies across the entire Kubernetes cluster. Falco profiles allow for users to
-define the activity they want to monitor for and detect. Among other things, this can include system
-log entries, process starts, file activity, and network ports opened. AppArmor is used to block any
-undesired activity via AppArmor profiles. These profiles are loaded into the cluster when
-referenced by Pod Security Policies.
-
-By default, Container Host Security is deployed into the cluster in monitor mode only, with no
-default profiles or rules running out-of-the-box. Activity monitoring and blocking begins only when
-users define profiles for these technologies.
-
-## Installation
-
-See the [installation guide](quick_start_guide.md) for the recommended steps to install the
-Container Host Security capabilities. This guide shows the recommended way of installing Container
-Host Security through the Cluster Management Project. However, it's also possible to do a manual
-installation through our Helm chart.
-
-## Features
-
-- Prevent containers from starting as root.
-- Limit the privileges and system calls available to containers.
-- Monitor system logs, process starts, files read/written/deleted, and network ports opened.
-- Optionally block processes from starting or files from being read/written/deleted.
-
-## Supported container orchestrators
-
-Kubernetes v1.14+ is the only supported container orchestrator. OpenShift and other container
-orchestrators aren't supported.
-
-## Supported Kubernetes providers
-
-The following cloud providers are supported:
-
-- Amazon EKS
-- Google GKE
-
-Although Container Host Security may function on Azure or self-managed Kubernetes instances, it isn't
-officially tested and supported on those providers.
-
-## Roadmap
-
-See the [Category Direction page](https://about.gitlab.com/direction/protect/container_host_security/)
-for more information on the product direction of Container Host Security.
diff --git a/doc/user/project/clusters/protect/container_host_security/quick_start_guide.md b/doc/user/project/clusters/protect/container_host_security/quick_start_guide.md
deleted file mode 100644
index af3128e3006..00000000000
--- a/doc/user/project/clusters/protect/container_host_security/quick_start_guide.md
+++ /dev/null
@@ -1,72 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Getting started with Container Host Security **(FREE)**
-
-> [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476) in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477) in GitLab 15.0.
-
-WARNING:
-Container Host Security is in its end-of-life process. It's [deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476)
-in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477)
-in GitLab 15.0.
-
-The following steps are recommended for installing Container Host Security.
-
-## Installation steps
-
-The following steps are recommended to install and use Container Host Security through GitLab:
-
-1. [Install at least one runner and connect it to GitLab](https://docs.gitlab.com/runner/).
-1. [Create a group](../../../../group/#create-a-group).
-1. [Connect a Kubernetes cluster to the group](../../add_remove_clusters.md).
-1. [Create a cluster management project and associate it with the Kubernetes cluster](../../../../clusters/management_project.md).
-
-1. Install and configure an Ingress node:
-
- - [Install the Ingress node via CI/CD (Cluster Management Project)](../../../../clusters/applications.md#install-ingress-using-gitlab-cicd).
- - Navigate to the Kubernetes page and enter the [DNS address for the external endpoint](../../gitlab_managed_clusters.md#base-domain)
- into the **Base domain** field on the **Details** tab. Save the changes to the Kubernetes
- cluster.
-
-1. [Install and configure Falco](../../../../clusters/applications.md#install-falco-using-gitlab-cicd)
- for activity monitoring.
-1. [Install and configure AppArmor](../../../../clusters/applications.md#install-apparmor-using-gitlab-cicd)
- for activity blocking.
-1. [Configure Pod Security Policies](../../../../clusters/applications.md#using-podsecuritypolicy-in-your-deployments)
- (required to be able to load AppArmor profiles).
-
-It's possible to install and manage Falco and AppArmor in other ways, such as installing them
-manually in a Kubernetes cluster and then connecting it back to GitLab. These methods aren't
-supported or documented.
-
-## Viewing the logs
-
-Falco logs can be viewed by running the following command in your Kubernetes cluster:
-
-```shell
-kubectl -n gitlab-managed-apps logs -l app=falco
-```
-
-## Troubleshooting
-
-### Trouble connecting to the cluster
-
-Your CI/CD pipeline may occasionally fail or have trouble connecting to the cluster. Here are some
-initial troubleshooting steps that resolve the most common problems:
-
-1. [Clear the cluster cache](../../gitlab_managed_clusters.md#clearing-the-cluster-cache)
-1. If things still aren't working, a more assertive set of actions may help get things back to a
- good state:
-
- - Stop and [delete the problematic environment](../../../../../ci/environments/#delete-a-stopped-environment)
- in GitLab.
- - Delete the relevant namespace in Kubernetes by running
- `kubectl delete namespaces <insert-some-namespace-name>` in your Kubernetes cluster.
- - Rerun the application project pipeline to redeploy the application.
-
-**Related documentation links:**
-
-- [Cluster Management Project](../../../../clusters/management_project.md)
diff --git a/doc/user/project/clusters/protect/container_network_security/index.md b/doc/user/project/clusters/protect/container_network_security/index.md
deleted file mode 100644
index b294859c660..00000000000
--- a/doc/user/project/clusters/protect/container_network_security/index.md
+++ /dev/null
@@ -1,76 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Container Network Security **(FREE)**
-
-> [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476) in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477) in GitLab 15.0.
-
-WARNING:
-Container Network Security is in its end-of-life process. It's [deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476)
-in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477)
-in GitLab 15.0.
-
-Container Network Security in GitLab provides basic firewall functionality by leveraging Cilium
-NetworkPolicies to filter traffic going in and out of the cluster as well as traffic between pods
-inside the cluster. Container Network Security can be used to enforce L3, L4, and L7 policies and
-can prevent an attacker with control over one pod from spreading laterally to access other pods in
-the same cluster. Both Ingress and Egress rules are supported.
-
-By default, Cilium is deployed in Detection-only mode and only logs attack attempts. GitLab provides
-a set of out-of-the-box policies as examples and to help users get started. These policies are
-disabled by default, as they must usually be customized to match application-specific needs.
-
-## Installation
-
-See the [installation guide](quick_start_guide.md) for the recommended steps to install GitLab
-Container Network Security. This guide shows the recommended way of installing Container Network
-Security through the Cluster Management Project. However, it's also possible to install Cilium
-manually through our Helm chart.
-
-## Features
-
-- GitLab managed installation of Cilium.
-- Support for L3, L4, and L7 policies.
-- Ability to export logs to a SIEM.
-- Statistics page showing volume of packets processed and dropped over time (Ultimate users only).
-- Management of NetworkPolicies through code in a project (Available for auto DevOps users only).
-- Management of CiliumNetworkPolicies through a UI policy manager (Ultimate users only).
-
-## Supported container orchestrators
-
-Kubernetes v1.14+ is the only supported container orchestrator. OpenShift and other container
-orchestrators aren't supported.
-
-## Supported Kubernetes providers
-
-The following cloud providers are supported:
-
-- Amazon EKS
-- Google GKE
-
-Although Container Network Security may function on Azure or self-managed Kubernetes instances, it
-isn't officially tested and supported on those providers.
-
-## Supported NetworkPolicies
-
-GitLab only supports the use of CiliumNetworkPolicies. Although generic Kubernetes NetworkPolicies
-or other kinds of NetworkPolicies may work, GitLab doesn't test or support them.
-
-## Managing NetworkPolicies through GitLab vs your cloud provider
-
-Some cloud providers offer integrations with Cilium or offer other ways to manage NetworkPolicies in
-Kubernetes. GitLab Container Network Security doesn't support deployments that have NetworkPolicies
-managed by an external provider. By choosing to manage NetworkPolicies through GitLab, you can take
-advantage of the following benefits:
-
-- Support for handling NetworkPolicy infrastructure as code.
-- Full revision history and audit log of all changes made.
-- Ability to revert back to a previous version at any time.
-
-## Roadmap
-
-See the [Category Direction page](https://about.gitlab.com/direction/protect/container_network_security/)
-for more information on the product direction of Container Network Security.
diff --git a/doc/user/project/clusters/protect/container_network_security/quick_start_guide.md b/doc/user/project/clusters/protect/container_network_security/quick_start_guide.md
deleted file mode 100644
index 7671ed7eb73..00000000000
--- a/doc/user/project/clusters/protect/container_network_security/quick_start_guide.md
+++ /dev/null
@@ -1,230 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Getting started with Container Network Security **(FREE)**
-
-> [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476) in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477) in GitLab 15.0.
-
-WARNING:
-Container Network Security is in its end-of-life process. It's [deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476)
-in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477)
-in GitLab 15.0.
-
-The following steps are recommended for installing Container Network Security.
-
-## Installation steps
-
-The following steps are recommended to install and use Container Network Security through GitLab:
-
-1. [Install at least one runner and connect it to GitLab](https://docs.gitlab.com/runner/).
-1. [Create a group](../../../../group/#create-a-group).
-1. [Connect a Kubernetes cluster to the group](../../add_remove_clusters.md).
-1. [Create a cluster management project and associate it with the Kubernetes cluster](../../../../clusters/management_project.md).
-
-1. Install and configure an Ingress node:
-
- - [Install the Ingress node via CI/CD (Cluster Management Project)](../../../../clusters/applications.md#install-ingress-using-gitlab-cicd).
- - Navigate to the Kubernetes page and enter the [DNS address for the external endpoint](../../gitlab_managed_clusters.md#base-domain)
- into the **Base domain** field on the **Details** tab. Save the changes to the Kubernetes
- cluster.
-
-1. [Install and configure Cilium](#use-the-cluster-management-template-to-install-cilium).
-1. Be sure to restart all pods that were running before Cilium was installed by running this command
- in your cluster:
-
- `kubectl get pods --all-namespaces -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true | grep '<none>' | awk '{print "-n "$1" "$2}' | xargs -L 1 -r kubectl delete pod`
-
- You can skip this step if `nodeinit.restartPods` is set to `true` on your Helm chart.
-
-It's possible to install and manage Cilium in other ways. For example, you could use the GitLab Helm
-chart to install Cilium manually in a Kubernetes cluster, and then connect it back to GitLab.
-However, such methods aren't documented or officially supported by GitLab.
-
-### Use the Cluster Management template to install Cilium
-
-[Cilium](https://cilium.io/) is a networking plug-in for Kubernetes that you can use to implement
-support for [`NetworkPolicy`](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
-resources. For more information, see [Network Policies](../../../../../topics/autodevops/stages.md#network-policy).
-
-You can use the [Cluster Management Project Template](../../../../clusters/management_project_template.md)
-to install Cilium in your Kubernetes cluster.
-
-1. In your cluster management project, go to `helmfile.yaml` and uncomment `- path: applications/cilium/helmfile.yaml`.
-1. In `applications/cilium/helmfile.yaml`, set `clusterType` to either `gke` or `eks` based on which Kubernetes provider your are using.
-
- ```yaml
- environments:
- default:
- values:
- # Set to "gke" or "eks" based on your cluster type
- - clusterType: ""
- ```
-
-1. Merge or push these changes to the default branch of your cluster management project,
-and [GitLab CI/CD](../../../../../ci/index.md) will automatically install Cilium.
-
-WARNING:
-Installation and removal of the Cilium requires a **manual**
-[restart](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/#restart-unmanaged-pods)
-of all affected pods in all namespaces to ensure that they are
-[managed](https://docs.cilium.io/en/stable/operations/troubleshooting/#ensure-managed-pod)
-by the correct networking plug-in. When Hubble is enabled, its related pod might require a
-restart depending on whether it started prior to Cilium. For more information, see
-[Failed Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#failed-deployment)
-in the Kubernetes docs.
-
-NOTE:
-Major upgrades might require additional setup steps. For more information, see
-the official [upgrade guide](https://docs.cilium.io/en/stable/operations/upgrade/).
-
-Support for installing the Cilium application is provided by the
-GitLab Container Security group. If you run into unknown issues,
-[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
-least 2 people from the
-[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).
-
-### Configure the Cilium Helm chart
-
-You can customize Cilium's Helm variables by editing the `applications/cilium/values.yaml`
-file in your cluster management project. Refer to the [Cilium Helm reference](https://docs.cilium.io/en/stable/helm-reference/)
-for the available configuration options.
-
-By default, Cilium's
-[audit mode](https://docs.cilium.io/en/stable/gettingstarted/policy-creation/#enable-policy-audit-mode)
-is enabled. In audit mode, Cilium doesn't drop disallowed packets. You
-can use `policy-verdict` log to observe policy-related decisions. You
-can disable audit mode by setting `policyAuditMode: false` in
-`applications/cilium/values.yaml`.
-
-The Cilium monitor log for traffic is logged out by the
-`cilium-monitor` sidecar container. You can check these logs with the following command:
-
-```shell
-kubectl -n gitlab-managed-apps logs -l k8s-app=cilium -c cilium-monitor
-```
-
-You can disable the monitor log in `application/cilium/values.yaml`:
-
-```yaml
-monitor:
- enabled: false
-```
-
-The [Hubble](https://github.com/cilium/hubble) monitoring daemon is enabled by default
-and it's set to collect per namespace flow metrics. This metrics are accessible on the
-[Threat Monitoring](../../../../application_security/threat_monitoring/index.md)
-dashboard. You can disable Hubble by adding the following to
-`applications/cilium/values.yaml`:
-
-```yaml
-hubble:
- enabled: false
-```
-
-You can also adjust Helm values for Hubble by using
-`applications/cilium/values.yaml`:
-
-```yaml
-hubble:
- enabled: true
- metrics:
- enabled:
- - 'flow:sourceContext=namespace;destinationContext=namespace'
-```
-
-## Managing Network Policies
-
-Managing NetworkPolicies through GitLab is advantageous over managing the policies in Kubernetes
-directly. Kubernetes doesn't provide a GUI editor, a change control process, or a revision history.
-Network Policies can be managed through GitLab in one of two ways:
-
-- Management through a YAML file in each application's project (for projects using Auto DevOps). For
- more information, see the [Network Policy documentation](../../../../../topics/autodevops/stages.md#network-policy).
-- Management through the GitLab Policy management UI (for projects not using Auto DevOps). For more
- information, see the [Container Network Policy documentation](../../../../application_security/policies/index.md#container-network-policy) (Ultimate only).
-
-Each method has benefits and drawbacks:
-
-| | YAML method | UI method (Ultimate only) |
-|--|:------------|:-------------------------------|
-| **Benefits** | A change control process is possible by requiring [MR Approvals](../../../merge_requests/approvals/index.md). All changes are fully tracked and audited in the same way that Git tracks the history of any file in its repository. | The UI provides a simple rules editor for users who are less familiar with the YAML syntax of NetworkPolicies. This view is a live representation of the policies currently deployed in the Kubernetes cluster. The UI also allows for multiple network policies to be created per environment. |
-| **Drawbacks** | Only one network policy can be deployed per environment (although that policy can be as detailed as needed). Also, if changes were made in Kubernetes directly rather than through the `auto-deploy-values.yaml` file, the YAML file's contents don't represent the actual state of policies deployed in Kubernetes. | Policy changes aren't audited and a change control process isn't available. |
-
-Users are encouraged to choose one of the two methods to manage their policies. If users attempt to
-use both methods simultaneously, when the application project pipeline runs the contents of the
-NetworkPolicy in the `auto-deploy-values.yaml` file may override policies configured in the UI
-editor.
-
-## Monitoring throughput **(ULTIMATE)**
-
-To view statistics for Container Network Security, you must follow the installation steps above and
-configure GitLab integration with Prometheus. Also, if you use custom Helm values for Cilium, you
-must enable Hubble with flow metrics for each namespace by adding the following lines to
-your [Cilium values](#use-the-cluster-management-template-to-install-cilium):
-
-```yaml
-hubble:
- enabled: true
- metrics:
- enabled:
- - 'flow:sourceContext=namespace;destinationContext=namespace'
-```
-
-Additional information about the statistics page is available in the
-[documentation that describes the Threat Management UI](../../../../application_security/policies/index.md#container-network-policy).
-
-## Forwarding logs to a SIEM
-
-Cilium logs can be forwarded to a SIEM or an external logging system through syslog protocol by
-installing and configuring Fluentd. Fluentd can be installed through the GitLab
-[Cluster Management Project](../../../../clusters/applications.md#install-fluentd-using-gitlab-cicd).
-
-## Viewing the logs
-
-Cilium logs can be viewed by running the following command in your Kubernetes cluster:
-
-```shell
-kubectl -n gitlab-managed-apps logs -l k8s-app=cilium -c cilium-monitor
-```
-
-## Troubleshooting
-
-### Traffic is not being blocked as expected
-
-By default, Cilium is installed in Audit mode only, meaning that NetworkPolicies log policy
-violations but don't block any traffic. To set Cilium to Blocking mode, you must add the following
-lines to the `applications/cilium/values.yaml` file in your cluster management project:
-
-```yaml
-policyEnforcementMode: "always"
-
-monitor:
- eventTypes: ["drop", "policy-verdict"]
-```
-
-### Traffic is not being allowed as expected
-
-Keep in mind that when Cilium is set to blocking mode (rather than Audit mode), NetworkPolicies
-operate on an allow-list basis. If one or more NetworkPolicies apply to a node, then all traffic
-that doesn't match at least one Policy is blocked. To resolve, add NetworkPolicies defining the
-traffic that you want to allow in the node.
-
-### Trouble connecting to the cluster
-
-Occasionally, your CI/CD pipeline may fail or have trouble connecting to the cluster. Here are some
-initial troubleshooting steps that resolve the most common problems:
-
-1. [Clear the cluster cache](../../gitlab_managed_clusters.md#clearing-the-cluster-cache).
-1. If things still aren't working, a more assertive set of actions may help get things back into a
- good state:
-
- - Stop and [delete the problematic environment](../../../../../ci/environments/index.md#delete-a-stopped-environment) in GitLab.
- - Delete the relevant namespace in Kubernetes by running `kubectl delete namespaces <insert-some-namespace-name>` in your Kubernetes cluster.
- - Rerun the application project pipeline to redeploy the application.
-
-**Related documentation links:**
-
-- [Cluster Management Project](../../../../clusters/management_project.md)
diff --git a/doc/user/project/clusters/protect/index.md b/doc/user/project/clusters/protect/index.md
deleted file mode 100644
index 6b89f7f1557..00000000000
--- a/doc/user/project/clusters/protect/index.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-stage: Protect
-group: Container Security
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
----
-
-# Protecting your deployed applications **(FREE)**
-
-> [Deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476) in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477) in GitLab 15.0.
-
-WARNING:
-The Container Network Security and Container Host Security features are in their end-of-life
-processes. They're
-[deprecated](https://gitlab.com/groups/gitlab-org/-/epics/7476)
-in GitLab 14.8, and planned for [removal](https://gitlab.com/groups/gitlab-org/-/epics/7477)
-in GitLab 15.0.
-
-GitLab makes it straightforward to protect applications deployed in [connected Kubernetes clusters](index.md).
-These protections are available in the Kubernetes network layer and in the container itself. At
-the network layer, the Container Network Security capabilities in GitLab provide basic firewall
-functionality by leveraging Cilium NetworkPolicies to filter traffic going in and out of the cluster
-and traffic between pods inside the cluster. Inside the container, Container Host Security provides
-Intrusion Detection and Prevention capabilities that can monitor and block activity inside the
-containers themselves.
-
-## Capabilities
-
-The following capabilities are available to protect deployed applications in Kubernetes:
-
-- Container Network Security
- - [Overview](container_network_security/index.md)
- - [Installation guide](container_network_security/quick_start_guide.md)
-- Container Host Security
- - [Overview](container_host_security/index.md)
- - [Installation guide](container_host_security/quick_start_guide.md)
diff --git a/doc/user/project/settings/index.md b/doc/user/project/settings/index.md
index 31cda756a78..2dca0235a62 100644
--- a/doc/user/project/settings/index.md
+++ b/doc/user/project/settings/index.md
@@ -409,10 +409,13 @@ NOTE:
Only project owners and administrators have the [permissions](../../permissions.md#project-members-permissions)
to transfer a project.
-You can transfer an existing project into a [group](../../group/index.md).
+You can transfer an existing project to another [group](../../group/index.md),
+or you can transfer a [personal project](../working_with_projects.md#view-personal-projects) to a group.
Prerequisites:
+- A group for your project. You can [view your existing groups](../../group/index.md#view-groups)
+ to find a suitable group. If you don't have a group, [create one](../../group/index.md#create-a-group).
- You must have at least the Maintainer role in that group.
- You must be the Owner of that project.
- The group to which the project is being transferred to must allow creation of new projects.
@@ -423,15 +426,15 @@ Prerequisites:
To transfer a project:
-1. Navigate to your project's **Settings > General**.
-1. Under **Advanced**, click **Expand**.
-1. Under "Transfer project", choose the namespace you want to transfer the
- project to.
-1. Confirm the transfer by typing the project's path as instructed.
+1. On the top bar, select **Menu > Projects** and find your project.
+1. On the left sidebar, select **Settings > General**.
+1. Expand **Advanced**.
+1. Under **Transfer project**, choose the namespace to transfer the project to.
+1. Select **Transfer project**.
+1. Enter the project's name and select **Confirm**.
-Once done, you are redirected to the new project's namespace. At this point,
-read what happens with the
-[redirects from the old project to the new one](../repository/index.md#what-happens-when-a-repository-path-changes).
+You are redirected to the project's new URL. Read what happens with the
+[redirects from the old URL to the new one](../repository/index.md#what-happens-when-a-repository-path-changes).
NOTE:
GitLab administrators can use the [administration interface](../../admin_area/index.md#administering-projects)
diff --git a/doc/user/project/working_with_projects.md b/doc/user/project/working_with_projects.md
index 03530b59e9b..bfc83aa22f5 100644
--- a/doc/user/project/working_with_projects.md
+++ b/doc/user/project/working_with_projects.md
@@ -275,6 +275,18 @@ To add a star to a project:
- Number of open merge requests.
- Number of open issues.
+## View personal projects
+
+Personal projects are projects created under your personal namespace.
+
+For example, if you create an account with the username `alex`, and create a project
+called `my-project` under your username, the project is created at `https://gitlab.example.com/alex/my-project`.
+
+To view your personal projects:
+
+1. On the top bar, select **Menu > Projects > Your Projects**.
+1. Under **Your projects**, select **Personal**.
+
## Delete a project
After you delete a project, projects in personal namespaces are deleted immediately. To delay deletion of projects in a group
diff --git a/lib/api/entities/user.rb b/lib/api/entities/user.rb
index ff711b4dec2..2366d137cc2 100644
--- a/lib/api/entities/user.rb
+++ b/lib/api/entities/user.rb
@@ -18,6 +18,9 @@ module API
expose :following, if: ->(user, opts) { Ability.allowed?(opts[:current_user], :read_user_profile, user) } do |user|
user.followees.size
end
+ expose :is_followed, if: ->(user, opts) { Ability.allowed?(opts[:current_user], :read_user_profile, user) && opts[:current_user] } do |user, opts|
+ opts[:current_user].following?(user)
+ end
expose :local_time do |user|
local_time(user.timezone)
end
diff --git a/lib/gitlab/gitaly_client/repository_service.rb b/lib/gitlab/gitaly_client/repository_service.rb
index 1e199a55b5a..5adb8d946a0 100644
--- a/lib/gitlab/gitaly_client/repository_service.rb
+++ b/lib/gitlab/gitaly_client/repository_service.rb
@@ -48,7 +48,7 @@ module Gitlab
def repository_size
request = Gitaly::RepositorySizeRequest.new(repository: @gitaly_repo)
- response = GitalyClient.call(@storage, :repository_service, :repository_size, request, timeout: GitalyClient.medium_timeout)
+ response = GitalyClient.call(@storage, :repository_service, :repository_size, request, timeout: GitalyClient.long_timeout)
response.size
end
diff --git a/lib/gitlab/gon_helper.rb b/lib/gitlab/gon_helper.rb
index dd64f1c11ee..3cb7cefcc07 100644
--- a/lib/gitlab/gon_helper.rb
+++ b/lib/gitlab/gon_helper.rb
@@ -59,6 +59,7 @@ module Gitlab
push_frontend_feature_flag(:source_editor_toolbar, default_enabled: :yaml)
push_frontend_feature_flag(:gl_avatar_for_all_user_avatars, default_enabled: :yaml)
push_frontend_feature_flag(:mr_attention_requests, default_enabled: :yaml)
+ push_frontend_feature_flag(:follow_in_user_popover, current_user, default_enabled: :yaml)
end
# Exposes the state of a feature flag to the frontend code.
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index dd0d50c4153..ab11f549de3 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -4183,12 +4183,18 @@ msgstr ""
msgid "An error occurred while triggering the job."
msgstr ""
+msgid "An error occurred while trying to follow this user, please try again."
+msgstr ""
+
msgid "An error occurred while trying to generate the report. Please try again later."
msgstr ""
msgid "An error occurred while trying to run a new pipeline for this merge request."
msgstr ""
+msgid "An error occurred while trying to unfollow this user, please try again."
+msgstr ""
+
msgid "An error occurred while unsubscribing to notifications."
msgstr ""
diff --git a/spec/controllers/repositories/lfs_storage_controller_spec.rb b/spec/controllers/repositories/lfs_storage_controller_spec.rb
index 7ddc5723e2e..672e6f1e85b 100644
--- a/spec/controllers/repositories/lfs_storage_controller_spec.rb
+++ b/spec/controllers/repositories/lfs_storage_controller_spec.rb
@@ -155,7 +155,7 @@ RSpec.describe Repositories::LfsStorageController do
context 'with an invalid file' do
let(:uploaded_file) { 'test' }
- it_behaves_like 'returning response status', :unprocessable_entity
+ it_behaves_like 'returning response status', :bad_request
end
context 'when an expected error' do
@@ -179,12 +179,10 @@ RSpec.describe Repositories::LfsStorageController do
end
context 'when existing file has been deleted' do
- let(:lfs_object) { create(:lfs_object, :with_file) }
+ let(:lfs_object) { create(:lfs_object, :with_file, size: params[:size], oid: params[:oid]) }
before do
FileUtils.rm(lfs_object.file.path)
- params[:oid] = lfs_object.oid
- params[:size] = lfs_object.size
end
it 'replaces the file' do
@@ -204,10 +202,10 @@ RSpec.describe Repositories::LfsStorageController do
end
end
- it 'renders LFS forbidden' do
+ it 'renders bad request' do
subject
- expect(response).to have_gitlab_http_status(:forbidden)
+ expect(response).to have_gitlab_http_status(:bad_request)
expect(lfs_object.reload.file).not_to exist
end
end
@@ -239,8 +237,9 @@ RSpec.describe Repositories::LfsStorageController do
FileUtils.mkdir_p(upload_path)
File.write(file_path, 'test')
+ File.truncate(file_path, params[:size].to_i)
- UploadedFile.new(file_path, filename: File.basename(file_path))
+ UploadedFile.new(file_path, filename: File.basename(file_path), sha256: params[:oid])
end
end
end
diff --git a/spec/frontend/api/user_api_spec.js b/spec/frontend/api/user_api_spec.js
new file mode 100644
index 00000000000..ee7194bdf5f
--- /dev/null
+++ b/spec/frontend/api/user_api_spec.js
@@ -0,0 +1,50 @@
+import MockAdapter from 'axios-mock-adapter';
+
+import { followUser, unfollowUser } from '~/api/user_api';
+import axios from '~/lib/utils/axios_utils';
+
+describe('~/api/user_api', () => {
+ let axiosMock;
+ let originalGon;
+
+ beforeEach(() => {
+ axiosMock = new MockAdapter(axios);
+
+ originalGon = window.gon;
+ window.gon = { api_version: 'v4' };
+ });
+
+ afterEach(() => {
+ axiosMock.restore();
+ axiosMock.resetHistory();
+ window.gon = originalGon;
+ });
+
+ describe('followUser', () => {
+ it('calls correct URL and returns expected response', async () => {
+ const expectedUrl = '/api/v4/users/1/follow';
+ const expectedResponse = { message: 'Success' };
+
+ axiosMock.onPost(expectedUrl).replyOnce(200, expectedResponse);
+
+ await expect(followUser(1)).resolves.toEqual(
+ expect.objectContaining({ data: expectedResponse }),
+ );
+ expect(axiosMock.history.post[0].url).toBe(expectedUrl);
+ });
+ });
+
+ describe('unfollowUser', () => {
+ it('calls correct URL and returns expected response', async () => {
+ const expectedUrl = '/api/v4/users/1/unfollow';
+ const expectedResponse = { message: 'Success' };
+
+ axiosMock.onPost(expectedUrl).replyOnce(200, expectedResponse);
+
+ await expect(unfollowUser(1)).resolves.toEqual(
+ expect.objectContaining({ data: expectedResponse }),
+ );
+ expect(axiosMock.history.post[0].url).toBe(expectedUrl);
+ });
+ });
+});
diff --git a/spec/frontend/boards/stores/getters_spec.js b/spec/frontend/boards/stores/getters_spec.js
index b30968c45d7..304f2aad98e 100644
--- a/spec/frontend/boards/stores/getters_spec.js
+++ b/spec/frontend/boards/stores/getters_spec.js
@@ -215,4 +215,33 @@ describe('Boards - Getters', () => {
expect(getters.isEpicBoard()).toBe(false);
});
});
+
+ describe('hasScope', () => {
+ const boardConfig = {
+ labels: [],
+ assigneeId: null,
+ iterationCadenceId: null,
+ iterationId: null,
+ milestoneId: null,
+ weight: null,
+ };
+
+ it('returns false when boardConfig is empty', () => {
+ const state = { boardConfig };
+
+ expect(getters.hasScope(state)).toBe(false);
+ });
+
+ it('returns true when boardScope has a label', () => {
+ const state = { boardConfig: { ...boardConfig, labels: ['foo'] } };
+
+ expect(getters.hasScope(state)).toBe(true);
+ });
+
+ it('returns true when boardConfig has a value other than null', () => {
+ const state = { boardConfig: { ...boardConfig, assigneeId: 3 } };
+
+ expect(getters.hasScope(state)).toBe(true);
+ });
+ });
});
diff --git a/spec/frontend/lib/utils/users_cache_spec.js b/spec/frontend/lib/utils/users_cache_spec.js
index 30bdddd8e73..d35ba20f570 100644
--- a/spec/frontend/lib/utils/users_cache_spec.js
+++ b/spec/frontend/lib/utils/users_cache_spec.js
@@ -228,4 +228,29 @@ describe('UsersCache', () => {
expect(userStatus).toBe(dummyUserStatus);
});
});
+
+ describe('updateById', () => {
+ describe('when the user is not cached', () => {
+ it('does nothing and returns undefined', () => {
+ expect(UsersCache.updateById(dummyUserId, { name: 'root' })).toBe(undefined);
+ expect(UsersCache.internalStorage).toStrictEqual({});
+ });
+ });
+
+ describe('when the user is cached', () => {
+ const updatedName = 'has two farms';
+ beforeEach(() => {
+ UsersCache.internalStorage[dummyUserId] = dummyUser;
+ });
+
+ it('updates the user only with the new data', async () => {
+ UsersCache.updateById(dummyUserId, { name: updatedName });
+
+ expect(await UsersCache.retrieveById(dummyUserId)).toStrictEqual({
+ username: dummyUser.username,
+ name: updatedName,
+ });
+ });
+ });
+ });
});
diff --git a/spec/frontend/runner/components/registration/registration_dropdown_spec.js b/spec/frontend/runner/components/registration/registration_dropdown_spec.js
index 5cd93df9967..81c2788f084 100644
--- a/spec/frontend/runner/components/registration/registration_dropdown_spec.js
+++ b/spec/frontend/runner/components/registration/registration_dropdown_spec.js
@@ -35,6 +35,16 @@ describe('RegistrationDropdown', () => {
const findRegistrationTokenInput = () => wrapper.findByTestId('token-value').find('input');
const findTokenResetDropdownItem = () =>
wrapper.findComponent(RegistrationTokenResetDropdownItem);
+ const findModalContent = () =>
+ createWrapper(document.body)
+ .find('[data-testid="runner-instructions-modal"]')
+ .text()
+ .replace(/[\n\t\s]+/g, ' ');
+
+ const openModal = async () => {
+ await findRegistrationInstructionsDropdownItem().trigger('click');
+ await waitForPromises();
+ };
const createComponent = ({ props = {}, ...options } = {}, mountFn = shallowMount) => {
wrapper = extendedWrapper(
@@ -49,6 +59,25 @@ describe('RegistrationDropdown', () => {
);
};
+ const createComponentWithModal = () => {
+ Vue.use(VueApollo);
+
+ const requestHandlers = [
+ [getRunnerPlatformsQuery, jest.fn().mockResolvedValue(mockGraphqlRunnerPlatforms)],
+ [getRunnerSetupInstructionsQuery, jest.fn().mockResolvedValue(mockGraphqlInstructions)],
+ ];
+
+ createComponent(
+ {
+ // Mock load modal contents from API
+ apolloProvider: createMockApollo(requestHandlers),
+ // Use `attachTo` to find the modal
+ attachTo: document.body,
+ },
+ mount,
+ );
+ };
+
it.each`
type | text
${INSTANCE_TYPE} | ${'Register an instance runner'}
@@ -76,29 +105,10 @@ describe('RegistrationDropdown', () => {
});
describe('When the dropdown item is clicked', () => {
- Vue.use(VueApollo);
-
- const requestHandlers = [
- [getRunnerPlatformsQuery, jest.fn().mockResolvedValue(mockGraphqlRunnerPlatforms)],
- [getRunnerSetupInstructionsQuery, jest.fn().mockResolvedValue(mockGraphqlInstructions)],
- ];
-
- const findModalInBody = () =>
- createWrapper(document.body).find('[data-testid="runner-instructions-modal"]');
-
beforeEach(async () => {
- createComponent(
- {
- // Mock load modal contents from API
- apolloProvider: createMockApollo(requestHandlers),
- // Use `attachTo` to find the modal
- attachTo: document.body,
- },
- mount,
- );
-
- await findRegistrationInstructionsDropdownItem().trigger('click');
- await waitForPromises();
+ createComponentWithModal({}, mount);
+
+ await openModal();
});
afterEach(() => {
@@ -106,9 +116,7 @@ describe('RegistrationDropdown', () => {
});
it('opens the modal with contents', () => {
- const modalText = findModalInBody()
- .text()
- .replace(/[\n\t\s]+/g, ' ');
+ const modalText = findModalContent();
expect(modalText).toContain('Install a runner');
@@ -153,15 +161,34 @@ describe('RegistrationDropdown', () => {
});
});
- it('Updates the token when it gets reset', async () => {
+ describe('When token is reset', () => {
const newToken = 'mock1';
- createComponent({}, mount);
- expect(findRegistrationTokenInput().props('value')).not.toBe(newToken);
+ const resetToken = async () => {
+ findTokenResetDropdownItem().vm.$emit('tokenReset', newToken);
+ await nextTick();
+ };
+
+ it('Updates token in input', async () => {
+ createComponent({}, mount);
+
+ expect(findRegistrationTokenInput().props('value')).not.toBe(newToken);
+
+ await resetToken();
+
+ expect(findRegistrationToken().props('value')).toBe(newToken);
+ });
- findTokenResetDropdownItem().vm.$emit('tokenReset', newToken);
- await nextTick();
+ it('Updates token in modal', async () => {
+ createComponentWithModal({}, mount);
- expect(findRegistrationToken().props('value')).toBe(newToken);
+ await openModal();
+
+ expect(findModalContent()).toContain(mockToken);
+
+ await resetToken();
+
+ expect(findModalContent()).toContain(newToken);
+ });
});
});
diff --git a/spec/frontend/user_popovers_spec.js b/spec/frontend/user_popovers_spec.js
index 745b66fd700..65137330ac3 100644
--- a/spec/frontend/user_popovers_spec.js
+++ b/spec/frontend/user_popovers_spec.js
@@ -1,5 +1,13 @@
+import { within } from '@testing-library/dom';
+
import UsersCache from '~/lib/utils/users_cache';
import initUserPopovers from '~/user_popovers';
+import waitForPromises from 'helpers/wait_for_promises';
+
+jest.mock('~/api/user_api', () => ({
+ followUser: jest.fn().mockResolvedValue({}),
+ unfollowUser: jest.fn().mockResolvedValue({}),
+}));
describe('User Popovers', () => {
const fixtureTemplate = 'merge_requests/merge_request_with_mentions.html';
@@ -19,7 +27,7 @@ describe('User Popovers', () => {
return link;
};
- const dummyUser = { name: 'root' };
+ const dummyUser = { name: 'root', username: 'root', is_followed: false };
const dummyUserStatus = { message: 'active' };
let popovers;
@@ -44,6 +52,13 @@ describe('User Popovers', () => {
jest
.spyOn(UsersCache, 'retrieveStatusById')
.mockImplementation((userId) => userStatusCacheSpy(userId));
+ jest.spyOn(UsersCache, 'updateById');
+
+ window.gon = {
+ features: {
+ followInUserPopover: true,
+ },
+ };
popovers = initUserPopovers(document.querySelectorAll(selector));
});
@@ -115,4 +130,32 @@ describe('User Popovers', () => {
expect(userLink.getAttribute('aria-describedby')).toBe(null);
});
+
+ it('updates toggle follow button and `UsersCache` when toggle follow button is clicked', async () => {
+ const [firstPopover] = popovers;
+ const withinFirstPopover = within(firstPopover.$el);
+ const findFollowButton = () => withinFirstPopover.queryByRole('button', { name: 'Follow' });
+ const findUnfollowButton = () => withinFirstPopover.queryByRole('button', { name: 'Unfollow' });
+
+ const userLink = document.querySelector(selector);
+ triggerEvent('mouseenter', userLink);
+
+ await waitForPromises();
+
+ const { userId } = document.querySelector(selector).dataset;
+
+ triggerEvent('click', findFollowButton());
+
+ await waitForPromises();
+
+ expect(findUnfollowButton()).not.toBe(null);
+ expect(UsersCache.updateById).toHaveBeenCalledWith(userId, { is_followed: true });
+
+ triggerEvent('click', findUnfollowButton());
+
+ await waitForPromises();
+
+ expect(findFollowButton()).not.toBe(null);
+ expect(UsersCache.updateById).toHaveBeenCalledWith(userId, { is_followed: false });
+ });
});
diff --git a/spec/frontend/vue_shared/components/user_popover/user_popover_spec.js b/spec/frontend/vue_shared/components/user_popover/user_popover_spec.js
index 3329199a46b..5595a44672a 100644
--- a/spec/frontend/vue_shared/components/user_popover/user_popover_spec.js
+++ b/spec/frontend/vue_shared/components/user_popover/user_popover_spec.js
@@ -3,9 +3,19 @@ import { mountExtended } from 'helpers/vue_test_utils_helper';
import { AVAILABILITY_STATUS } from '~/set_status_modal/utils';
import UserNameWithStatus from '~/sidebar/components/assignees/user_name_with_status.vue';
import UserPopover from '~/vue_shared/components/user_popover/user_popover.vue';
+import axios from '~/lib/utils/axios_utils';
+import createFlash from '~/flash';
+import { followUser, unfollowUser } from '~/api/user_api';
+
+jest.mock('~/flash');
+jest.mock('~/api/user_api', () => ({
+ followUser: jest.fn(),
+ unfollowUser: jest.fn(),
+}));
const DEFAULT_PROPS = {
user: {
+ id: 1,
username: 'root',
name: 'Administrator',
location: 'Vienna',
@@ -15,6 +25,7 @@ const DEFAULT_PROPS = {
workInformation: null,
status: null,
pronouns: 'they/them',
+ isFollowed: false,
loaded: true,
},
};
@@ -26,6 +37,7 @@ describe('User Popover Component', () => {
beforeEach(() => {
loadFixtures(fixtureTemplate);
+ gon.features = {};
});
afterEach(() => {
@@ -37,15 +49,17 @@ describe('User Popover Component', () => {
const findUserName = () => wrapper.find(UserNameWithStatus);
const findSecurityBotDocsLink = () => wrapper.findByTestId('user-popover-bot-docs-link');
const findUserLocalTime = () => wrapper.findByTestId('user-popover-local-time');
+ const findToggleFollowButton = () => wrapper.findByTestId('toggle-follow-button');
+
+ const createWrapper = (props = {}, { followInUserPopover = true } = {}) => {
+ gon.features.followInUserPopover = followInUserPopover;
- const createWrapper = (props = {}, options = {}) => {
wrapper = mountExtended(UserPopover, {
propsData: {
...DEFAULT_PROPS,
target: findTarget(),
...props,
},
- ...options,
});
};
@@ -289,4 +303,134 @@ describe('User Popover Component', () => {
expect(findUserLocalTime().exists()).toBe(false);
});
});
+
+ describe('follow actions with `followInUserPopover` flag enabled', () => {
+ describe("when current user doesn't follow the user", () => {
+ beforeEach(() => createWrapper());
+
+ it('renders the Follow button with the correct variant', () => {
+ expect(findToggleFollowButton().text()).toBe('Follow');
+ expect(findToggleFollowButton().props('variant')).toBe('confirm');
+ });
+
+ describe('when clicking', () => {
+ it('follows the user', async () => {
+ followUser.mockResolvedValue({});
+
+ await findToggleFollowButton().trigger('click');
+
+ expect(findToggleFollowButton().props('loading')).toBe(true);
+
+ await axios.waitForAll();
+
+ expect(wrapper.emitted().follow.length).toBe(1);
+ expect(wrapper.emitted().unfollow).toBeFalsy();
+ });
+
+ describe('when an error occurs', () => {
+ beforeEach(() => {
+ followUser.mockRejectedValue({});
+
+ findToggleFollowButton().trigger('click');
+ });
+
+ it('shows an error message', async () => {
+ await axios.waitForAll();
+
+ expect(createFlash).toHaveBeenCalledWith({
+ message: 'An error occurred while trying to follow this user, please try again.',
+ error: {},
+ captureError: true,
+ });
+ });
+
+ it('emits no events', async () => {
+ await axios.waitForAll();
+
+ expect(wrapper.emitted().follow).toBe(undefined);
+ expect(wrapper.emitted().unfollow).toBe(undefined);
+ });
+ });
+ });
+ });
+
+ describe('when current user follows the user', () => {
+ beforeEach(() => createWrapper({ user: { ...DEFAULT_PROPS.user, isFollowed: true } }));
+
+ it('renders the Unfollow button with the correct variant', () => {
+ expect(findToggleFollowButton().text()).toBe('Unfollow');
+ expect(findToggleFollowButton().props('variant')).toBe('default');
+ });
+
+ describe('when clicking', () => {
+ it('unfollows the user', async () => {
+ unfollowUser.mockResolvedValue({});
+
+ findToggleFollowButton().trigger('click');
+
+ await axios.waitForAll();
+
+ expect(wrapper.emitted().follow).toBe(undefined);
+ expect(wrapper.emitted().unfollow.length).toBe(1);
+ });
+
+ describe('when an error occurs', () => {
+ beforeEach(async () => {
+ unfollowUser.mockRejectedValue({});
+
+ findToggleFollowButton().trigger('click');
+
+ await axios.waitForAll();
+ });
+
+ it('shows an error message', () => {
+ expect(createFlash).toHaveBeenCalledWith({
+ message: 'An error occurred while trying to unfollow this user, please try again.',
+ error: {},
+ captureError: true,
+ });
+ });
+
+ it('emits no events', () => {
+ expect(wrapper.emitted().follow).toBe(undefined);
+ expect(wrapper.emitted().unfollow).toBe(undefined);
+ });
+ });
+ });
+ });
+
+ describe('when the current user is the user', () => {
+ beforeEach(() => {
+ gon.current_username = DEFAULT_PROPS.user.username;
+ createWrapper();
+ });
+
+ it("doesn't render the toggle follow button", () => {
+ expect(findToggleFollowButton().exists()).toBe(false);
+ });
+ });
+
+ describe('when API does not support `isFollowed`', () => {
+ beforeEach(() => {
+ const user = {
+ ...DEFAULT_PROPS.user,
+ isFollowed: undefined,
+ };
+
+ createWrapper({ user });
+ });
+
+ it('does not render the toggle follow button', () => {
+ expect(findToggleFollowButton().exists()).toBe(false);
+ });
+ });
+ });
+
+ describe('follow actions with `followInUserPopover` flag disabled', () => {
+ beforeEach(() => createWrapper({}, { followInUserPopover: false }));
+
+ it('doesn’t render the toggle follow button', () => {
+ expect(findToggleFollowButton().exists()).toBe(false);
+ });
+ });
});
diff --git a/spec/lib/api/entities/user_spec.rb b/spec/lib/api/entities/user_spec.rb
index be5e8e8e8c2..407f2894f01 100644
--- a/spec/lib/api/entities/user_spec.rb
+++ b/spec/lib/api/entities/user_spec.rb
@@ -12,7 +12,40 @@ RSpec.describe API::Entities::User do
subject { entity.as_json }
it 'exposes correct attributes' do
- expect(subject).to include(:name, :bio, :location, :public_email, :skype, :linkedin, :twitter, :website_url, :organization, :job_title, :work_information, :pronouns)
+ expect(subject.keys).to contain_exactly(
+ # UserSafe
+ :id, :username, :name,
+ # UserBasic
+ :state, :avatar_url, :web_url,
+ # User
+ :created_at, :bio, :location, :public_email, :skype, :linkedin, :twitter,
+ :website_url, :organization, :job_title, :pronouns, :bot, :work_information,
+ :followers, :following, :is_followed, :local_time
+ )
+ end
+
+ context 'exposing follow relationships' do
+ before do
+ allow(Ability).to receive(:allowed?).with(current_user, :read_user_profile, user).and_return(can_read_user_profile)
+ end
+
+ %i(followers following is_followed).each do |relationship|
+ context 'when current user cannot read user profile' do
+ let(:can_read_user_profile) { false }
+
+ it "does not expose #{relationship}" do
+ expect(subject).not_to include(relationship)
+ end
+ end
+
+ context 'when current user can read user profile' do
+ let(:can_read_user_profile) { true }
+
+ it "exposes #{relationship}" do
+ expect(subject).to include(relationship)
+ end
+ end
+ end
end
it 'exposes created_at if the current user can read the user profile' do
@@ -135,6 +168,16 @@ RSpec.describe API::Entities::User do
end
end
+ context 'with logged-out user' do
+ let(:current_user) { nil }
+
+ it 'exposes is_followed as nil' do
+ allow(Ability).to receive(:allowed?).with(current_user, :read_user_profile, user).and_return(true)
+
+ expect(subject.keys).not_to include(:is_followed)
+ end
+ end
+
it 'exposes local_time' do
local_time = '2:30 PM'
expect(entity).to receive(:local_time).with(timezone).and_return(local_time)
diff --git a/spec/lib/backup/manager_spec.rb b/spec/lib/backup/manager_spec.rb
index 17b2d571cfa..a5fd2690861 100644
--- a/spec/lib/backup/manager_spec.rb
+++ b/spec/lib/backup/manager_spec.rb
@@ -145,16 +145,11 @@ RSpec.describe Backup::Manager do
describe '#create' do
let(:incremental_env) { 'false' }
let(:expected_backup_contents) { %w{backup_information.yml task1.tar.gz task2.tar.gz} }
- let(:backup_id) { '1546300800_2019_01_01_12.3' }
+ let(:backup_time) { Time.utc(2019, 1, 1) }
+ let(:backup_id) { "1546300800_2019_01_01_#{Gitlab::VERSION}" }
let(:pack_tar_file) { "#{backup_id}_gitlab_backup.tar" }
let(:pack_tar_system_options) { { out: [pack_tar_file, 'w', Gitlab.config.backup.archive_permissions] } }
let(:pack_tar_cmdline) { ['tar', '-cf', '-', *expected_backup_contents, pack_tar_system_options] }
- let(:backup_information) do
- {
- backup_created_at: Time.zone.parse('2019-01-01'),
- gitlab_version: '12.3'
- }
- end
let(:task1) { instance_double(Backup::Task) }
let(:task2) { instance_double(Backup::Task) }
@@ -170,427 +165,429 @@ RSpec.describe Backup::Manager do
allow(ActiveRecord::Base.connection).to receive(:reconnect!)
allow(Gitlab::BackupLogger).to receive(:info)
allow(Kernel).to receive(:system).and_return(true)
- allow(YAML).to receive(:load_file).and_call_original
- allow(YAML).to receive(:load_file).with(File.join(Gitlab.config.backup.path, 'backup_information.yml'))
- .and_return(backup_information)
- allow(subject).to receive(:backup_information).and_return(backup_information)
allow(task1).to receive(:dump).with(File.join(Gitlab.config.backup.path, 'task1.tar.gz'), backup_id)
allow(task2).to receive(:dump).with(File.join(Gitlab.config.backup.path, 'task2.tar.gz'), backup_id)
end
it 'executes tar' do
- subject.create # rubocop:disable Rails/SaveBang
-
- expect(Kernel).to have_received(:system).with(*pack_tar_cmdline)
- end
-
- context 'tar fails' do
- before do
- expect(Kernel).to receive(:system).with(*pack_tar_cmdline).and_return(false)
- end
-
- it 'logs a failure' do
- expect do
- subject.create # rubocop:disable Rails/SaveBang
- end.to raise_error(Backup::Error, 'Backup failed')
+ travel_to(backup_time) do
+ subject.create # rubocop:disable Rails/SaveBang
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: "Creating archive #{pack_tar_file} failed")
+ expect(Kernel).to have_received(:system).with(*pack_tar_cmdline)
end
end
context 'when BACKUP is set' do
let(:backup_id) { 'custom' }
- it 'uses the given value as tar file name' do
+ before do
stub_env('BACKUP', '/ignored/path/custom')
- subject.create # rubocop:disable Rails/SaveBang
-
- expect(Kernel).to have_received(:system).with(*pack_tar_cmdline)
end
- end
- context 'when skipped is set in backup_information.yml' do
- let(:expected_backup_contents) { %w{backup_information.yml task1.tar.gz} }
- let(:backup_information) do
- {
- backup_created_at: Time.zone.parse('2019-01-01'),
- gitlab_version: '12.3',
- skipped: ['task2']
- }
- end
-
- it 'executes tar' do
+ it 'uses the given value as tar file name' do
subject.create # rubocop:disable Rails/SaveBang
expect(Kernel).to have_received(:system).with(*pack_tar_cmdline)
end
- end
-
- context 'when SKIP env is set' do
- let(:expected_backup_contents) { %w{backup_information.yml task1.tar.gz} }
- before do
- stub_env('SKIP', 'task2')
- end
-
- it 'executes tar' do
- subject.create # rubocop:disable Rails/SaveBang
+ context 'tar fails' do
+ before do
+ expect(Kernel).to receive(:system).with(*pack_tar_cmdline).and_return(false)
+ end
- expect(Kernel).to have_received(:system).with(*pack_tar_cmdline)
- end
- end
+ it 'logs a failure' do
+ expect do
+ subject.create # rubocop:disable Rails/SaveBang
+ end.to raise_error(Backup::Error, 'Backup failed')
- context 'when the destination is optional' do
- let(:expected_backup_contents) { %w{backup_information.yml task1.tar.gz} }
- let(:definitions) do
- {
- 'task1' => Backup::Manager::TaskDefinition.new(task: task1, destination_path: 'task1.tar.gz'),
- 'task2' => Backup::Manager::TaskDefinition.new(task: task2, destination_path: 'task2.tar.gz', destination_optional: true)
- }
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: "Creating archive #{pack_tar_file} failed")
+ end
end
- it 'executes tar' do
- expect(File).to receive(:exist?).with(File.join(Gitlab.config.backup.path, 'task2.tar.gz')).and_return(false)
+ context 'when SKIP env is set' do
+ let(:expected_backup_contents) { %w{backup_information.yml task1.tar.gz} }
- subject.create # rubocop:disable Rails/SaveBang
+ before do
+ stub_env('SKIP', 'task2')
+ end
- expect(Kernel).to have_received(:system).with(*pack_tar_cmdline)
- end
- end
+ it 'executes tar' do
+ subject.create # rubocop:disable Rails/SaveBang
- context 'many backup files' do
- let(:files) do
- [
- '1451606400_2016_01_01_1.2.3_gitlab_backup.tar',
- '1451520000_2015_12_31_4.5.6_gitlab_backup.tar',
- '1451520000_2015_12_31_4.5.6-pre_gitlab_backup.tar',
- '1451520000_2015_12_31_4.5.6-rc1_gitlab_backup.tar',
- '1451520000_2015_12_31_4.5.6-pre-ee_gitlab_backup.tar',
- '1451510000_2015_12_30_gitlab_backup.tar',
- '1450742400_2015_12_22_gitlab_backup.tar',
- '1449878400_gitlab_backup.tar',
- '1449014400_gitlab_backup.tar',
- 'manual_gitlab_backup.tar'
- ]
+ expect(Kernel).to have_received(:system).with(*pack_tar_cmdline)
+ end
end
- before do
- allow(Gitlab::BackupLogger).to receive(:info)
- allow(Dir).to receive(:chdir).and_yield
- allow(Dir).to receive(:glob).and_return(files)
- allow(FileUtils).to receive(:rm)
- allow(Time).to receive(:now).and_return(Time.utc(2016))
- end
+ context 'when the destination is optional' do
+ let(:expected_backup_contents) { %w{backup_information.yml task1.tar.gz} }
+ let(:definitions) do
+ {
+ 'task1' => Backup::Manager::TaskDefinition.new(task: task1, destination_path: 'task1.tar.gz'),
+ 'task2' => Backup::Manager::TaskDefinition.new(task: task2, destination_path: 'task2.tar.gz', destination_optional: true)
+ }
+ end
- context 'when keep_time is zero' do
- before do
- allow(Gitlab.config.backup).to receive(:keep_time).and_return(0)
+ it 'executes tar' do
+ expect(File).to receive(:exist?).with(File.join(Gitlab.config.backup.path, 'task2.tar.gz')).and_return(false)
subject.create # rubocop:disable Rails/SaveBang
- end
- it 'removes no files' do
- expect(FileUtils).not_to have_received(:rm)
- end
-
- it 'prints a skipped message' do
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... [SKIPPED]')
+ expect(Kernel).to have_received(:system).with(*pack_tar_cmdline)
end
end
- context 'when no valid file is found' do
+ context 'many backup files' do
let(:files) do
[
- '14516064000_2016_01_01_1.2.3_gitlab_backup.tar',
- 'foo_1451520000_2015_12_31_4.5.6_gitlab_backup.tar',
- '1451520000_2015_12_31_4.5.6-foo_gitlab_backup.tar'
+ '1451606400_2016_01_01_1.2.3_gitlab_backup.tar',
+ '1451520000_2015_12_31_4.5.6_gitlab_backup.tar',
+ '1451520000_2015_12_31_4.5.6-pre_gitlab_backup.tar',
+ '1451520000_2015_12_31_4.5.6-rc1_gitlab_backup.tar',
+ '1451520000_2015_12_31_4.5.6-pre-ee_gitlab_backup.tar',
+ '1451510000_2015_12_30_gitlab_backup.tar',
+ '1450742400_2015_12_22_gitlab_backup.tar',
+ '1449878400_gitlab_backup.tar',
+ '1449014400_gitlab_backup.tar',
+ 'manual_gitlab_backup.tar'
]
end
before do
- allow(Gitlab.config.backup).to receive(:keep_time).and_return(1)
-
- subject.create # rubocop:disable Rails/SaveBang
- end
-
- it 'removes no files' do
- expect(FileUtils).not_to have_received(:rm)
- end
-
- it 'prints a done message' do
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (0 removed)')
+ allow(Gitlab::BackupLogger).to receive(:info)
+ allow(Dir).to receive(:chdir).and_yield
+ allow(Dir).to receive(:glob).and_return(files)
+ allow(FileUtils).to receive(:rm)
+ allow(Time).to receive(:now).and_return(Time.utc(2016))
end
- end
- context 'when there are no files older than keep_time' do
- before do
- # Set to 30 days
- allow(Gitlab.config.backup).to receive(:keep_time).and_return(2592000)
+ context 'when keep_time is zero' do
+ before do
+ allow(Gitlab.config.backup).to receive(:keep_time).and_return(0)
- subject.create # rubocop:disable Rails/SaveBang
- end
+ subject.create # rubocop:disable Rails/SaveBang
+ end
- it 'removes no files' do
- expect(FileUtils).not_to have_received(:rm)
- end
+ it 'removes no files' do
+ expect(FileUtils).not_to have_received(:rm)
+ end
- it 'prints a done message' do
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (0 removed)')
+ it 'prints a skipped message' do
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... [SKIPPED]')
+ end
end
- end
- context 'when keep_time is set to remove files' do
- before do
- # Set to 1 second
- allow(Gitlab.config.backup).to receive(:keep_time).and_return(1)
+ context 'when no valid file is found' do
+ let(:files) do
+ [
+ '14516064000_2016_01_01_1.2.3_gitlab_backup.tar',
+ 'foo_1451520000_2015_12_31_4.5.6_gitlab_backup.tar',
+ '1451520000_2015_12_31_4.5.6-foo_gitlab_backup.tar'
+ ]
+ end
- subject.create # rubocop:disable Rails/SaveBang
- end
+ before do
+ allow(Gitlab.config.backup).to receive(:keep_time).and_return(1)
- it 'removes matching files with a human-readable versioned timestamp' do
- expect(FileUtils).to have_received(:rm).with(files[1])
- expect(FileUtils).to have_received(:rm).with(files[2])
- expect(FileUtils).to have_received(:rm).with(files[3])
- end
+ subject.create # rubocop:disable Rails/SaveBang
+ end
- it 'removes matching files with a human-readable versioned timestamp with tagged EE' do
- expect(FileUtils).to have_received(:rm).with(files[4])
- end
+ it 'removes no files' do
+ expect(FileUtils).not_to have_received(:rm)
+ end
- it 'removes matching files with a human-readable non-versioned timestamp' do
- expect(FileUtils).to have_received(:rm).with(files[5])
- expect(FileUtils).to have_received(:rm).with(files[6])
+ it 'prints a done message' do
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (0 removed)')
+ end
end
- it 'removes matching files without a human-readable timestamp' do
- expect(FileUtils).to have_received(:rm).with(files[7])
- expect(FileUtils).to have_received(:rm).with(files[8])
- end
+ context 'when there are no files older than keep_time' do
+ before do
+ # Set to 30 days
+ allow(Gitlab.config.backup).to receive(:keep_time).and_return(2592000)
- it 'does not remove files that are not old enough' do
- expect(FileUtils).not_to have_received(:rm).with(files[0])
- end
+ subject.create # rubocop:disable Rails/SaveBang
+ end
- it 'does not remove non-matching files' do
- expect(FileUtils).not_to have_received(:rm).with(files[9])
- end
+ it 'removes no files' do
+ expect(FileUtils).not_to have_received(:rm)
+ end
- it 'prints a done message' do
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (8 removed)')
+ it 'prints a done message' do
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (0 removed)')
+ end
end
- end
-
- context 'when removing a file fails' do
- let(:file) { files[1] }
- let(:message) { "Permission denied @ unlink_internal - #{file}" }
-
- before do
- allow(Gitlab.config.backup).to receive(:keep_time).and_return(1)
- allow(FileUtils).to receive(:rm).with(file).and_raise(Errno::EACCES, message)
- subject.create # rubocop:disable Rails/SaveBang
- end
+ context 'when keep_time is set to remove files' do
+ before do
+ # Set to 1 second
+ allow(Gitlab.config.backup).to receive(:keep_time).and_return(1)
- it 'removes the remaining expected files' do
- expect(FileUtils).to have_received(:rm).with(files[4])
- expect(FileUtils).to have_received(:rm).with(files[5])
- expect(FileUtils).to have_received(:rm).with(files[6])
- expect(FileUtils).to have_received(:rm).with(files[7])
- expect(FileUtils).to have_received(:rm).with(files[8])
- end
+ subject.create # rubocop:disable Rails/SaveBang
+ end
- it 'sets the correct removed count' do
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (7 removed)')
- end
+ it 'removes matching files with a human-readable versioned timestamp' do
+ expect(FileUtils).to have_received(:rm).with(files[1])
+ expect(FileUtils).to have_received(:rm).with(files[2])
+ expect(FileUtils).to have_received(:rm).with(files[3])
+ end
- it 'prints the error from file that could not be removed' do
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: a_string_matching(message))
- end
- end
- end
+ it 'removes matching files with a human-readable versioned timestamp with tagged EE' do
+ expect(FileUtils).to have_received(:rm).with(files[4])
+ end
- describe 'cloud storage' do
- let(:backup_file) { Tempfile.new('backup', Gitlab.config.backup.path) }
- let(:backup_filename) { File.basename(backup_file.path) }
+ it 'removes matching files with a human-readable non-versioned timestamp' do
+ expect(FileUtils).to have_received(:rm).with(files[5])
+ expect(FileUtils).to have_received(:rm).with(files[6])
+ end
- before do
- allow(Gitlab::BackupLogger).to receive(:info)
- allow(subject).to receive(:tar_file).and_return(backup_filename)
-
- stub_backup_setting(
- upload: {
- connection: {
- provider: 'AWS',
- aws_access_key_id: 'id',
- aws_secret_access_key: 'secret'
- },
- remote_directory: 'directory',
- multipart_chunk_size: 104857600,
- encryption: nil,
- encryption_key: nil,
- storage_class: nil
- }
- )
+ it 'removes matching files without a human-readable timestamp' do
+ expect(FileUtils).to have_received(:rm).with(files[7])
+ expect(FileUtils).to have_received(:rm).with(files[8])
+ end
- Fog.mock!
+ it 'does not remove files that are not old enough' do
+ expect(FileUtils).not_to have_received(:rm).with(files[0])
+ end
- # the Fog mock only knows about directories we create explicitly
- connection = ::Fog::Storage.new(Gitlab.config.backup.upload.connection.symbolize_keys)
- connection.directories.create(key: Gitlab.config.backup.upload.remote_directory) # rubocop:disable Rails/SaveBang
- end
+ it 'does not remove non-matching files' do
+ expect(FileUtils).not_to have_received(:rm).with(files[9])
+ end
- context 'skipped upload' do
- let(:backup_information) do
- {
- backup_created_at: Time.zone.parse('2019-01-01'),
- gitlab_version: '12.3',
- skipped: ['remote']
- }
+ it 'prints a done message' do
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (8 removed)')
+ end
end
- it 'informs the user' do
- stub_env('SKIP', 'remote')
- subject.create # rubocop:disable Rails/SaveBang
+ context 'when removing a file fails' do
+ let(:file) { files[1] }
+ let(:message) { "Permission denied @ unlink_internal - #{file}" }
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... [SKIPPED]')
- end
- end
-
- context 'target path' do
- it 'uses the tar filename by default' do
- expect_any_instance_of(Fog::Collection).to receive(:create)
- .with(hash_including(key: backup_filename, public: false))
- .and_call_original
+ before do
+ allow(Gitlab.config.backup).to receive(:keep_time).and_return(1)
+ allow(FileUtils).to receive(:rm).with(file).and_raise(Errno::EACCES, message)
- subject.create # rubocop:disable Rails/SaveBang
- end
+ subject.create # rubocop:disable Rails/SaveBang
+ end
- it 'adds the DIRECTORY environment variable if present' do
- stub_env('DIRECTORY', 'daily')
+ it 'removes the remaining expected files' do
+ expect(FileUtils).to have_received(:rm).with(files[4])
+ expect(FileUtils).to have_received(:rm).with(files[5])
+ expect(FileUtils).to have_received(:rm).with(files[6])
+ expect(FileUtils).to have_received(:rm).with(files[7])
+ expect(FileUtils).to have_received(:rm).with(files[8])
+ end
- expect_any_instance_of(Fog::Collection).to receive(:create)
- .with(hash_including(key: "daily/#{backup_filename}", public: false))
- .and_call_original
+ it 'sets the correct removed count' do
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Deleting old backups ... done. (7 removed)')
+ end
- subject.create # rubocop:disable Rails/SaveBang
+ it 'prints the error from file that could not be removed' do
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: a_string_matching(message))
+ end
end
end
- context 'with AWS with server side encryption' do
- let(:connection) { ::Fog::Storage.new(Gitlab.config.backup.upload.connection.symbolize_keys) }
- let(:encryption_key) { nil }
- let(:encryption) { nil }
- let(:storage_options) { nil }
+ describe 'cloud storage' do
+ let(:backup_file) { Tempfile.new('backup', Gitlab.config.backup.path) }
+ let(:backup_filename) { File.basename(backup_file.path) }
before do
+ allow(Gitlab::BackupLogger).to receive(:info)
+ allow(subject).to receive(:tar_file).and_return(backup_filename)
+
stub_backup_setting(
upload: {
connection: {
provider: 'AWS',
- aws_access_key_id: 'AWS_ACCESS_KEY_ID',
- aws_secret_access_key: 'AWS_SECRET_ACCESS_KEY'
+ aws_access_key_id: 'id',
+ aws_secret_access_key: 'secret'
},
remote_directory: 'directory',
- multipart_chunk_size: Gitlab.config.backup.upload.multipart_chunk_size,
- encryption: encryption,
- encryption_key: encryption_key,
- storage_options: storage_options,
+ multipart_chunk_size: 104857600,
+ encryption: nil,
+ encryption_key: nil,
storage_class: nil
}
)
+ Fog.mock!
+
+ # the Fog mock only knows about directories we create explicitly
+ connection = ::Fog::Storage.new(Gitlab.config.backup.upload.connection.symbolize_keys)
connection.directories.create(key: Gitlab.config.backup.upload.remote_directory) # rubocop:disable Rails/SaveBang
end
- context 'with SSE-S3 without using storage_options' do
- let(:encryption) { 'AES256' }
+ context 'skipped upload' do
+ let(:backup_information) do
+ {
+ backup_created_at: Time.zone.parse('2019-01-01'),
+ gitlab_version: '12.3',
+ skipped: ['remote']
+ }
+ end
- it 'sets encryption attributes' do
+ it 'informs the user' do
+ stub_env('SKIP', 'remote')
subject.create # rubocop:disable Rails/SaveBang
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with AES256)')
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... [SKIPPED]')
end
end
- context 'with SSE-C (customer-provided keys) options' do
- let(:encryption) { 'AES256' }
- let(:encryption_key) { SecureRandom.hex }
+ context 'target path' do
+ it 'uses the tar filename by default' do
+ expect_any_instance_of(Fog::Collection).to receive(:create)
+ .with(hash_including(key: backup_filename, public: false))
+ .and_call_original
- it 'sets encryption attributes' do
subject.create # rubocop:disable Rails/SaveBang
+ end
+
+ it 'adds the DIRECTORY environment variable if present' do
+ stub_env('DIRECTORY', 'daily')
+
+ expect_any_instance_of(Fog::Collection).to receive(:create)
+ .with(hash_including(key: "daily/#{backup_filename}", public: false))
+ .and_call_original
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with AES256)')
+ subject.create # rubocop:disable Rails/SaveBang
end
end
- context 'with SSE-KMS options' do
- let(:storage_options) do
- {
- server_side_encryption: 'aws:kms',
- server_side_encryption_kms_key_id: 'arn:aws:kms:12345'
- }
+ context 'with AWS with server side encryption' do
+ let(:connection) { ::Fog::Storage.new(Gitlab.config.backup.upload.connection.symbolize_keys) }
+ let(:encryption_key) { nil }
+ let(:encryption) { nil }
+ let(:storage_options) { nil }
+
+ before do
+ stub_backup_setting(
+ upload: {
+ connection: {
+ provider: 'AWS',
+ aws_access_key_id: 'AWS_ACCESS_KEY_ID',
+ aws_secret_access_key: 'AWS_SECRET_ACCESS_KEY'
+ },
+ remote_directory: 'directory',
+ multipart_chunk_size: Gitlab.config.backup.upload.multipart_chunk_size,
+ encryption: encryption,
+ encryption_key: encryption_key,
+ storage_options: storage_options,
+ storage_class: nil
+ }
+ )
+
+ connection.directories.create(key: Gitlab.config.backup.upload.remote_directory) # rubocop:disable Rails/SaveBang
end
- it 'sets encryption attributes' do
- subject.create # rubocop:disable Rails/SaveBang
+ context 'with SSE-S3 without using storage_options' do
+ let(:encryption) { 'AES256' }
- expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with aws:kms)')
+ it 'sets encryption attributes' do
+ subject.create # rubocop:disable Rails/SaveBang
+
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with AES256)')
+ end
+ end
+
+ context 'with SSE-C (customer-provided keys) options' do
+ let(:encryption) { 'AES256' }
+ let(:encryption_key) { SecureRandom.hex }
+
+ it 'sets encryption attributes' do
+ subject.create # rubocop:disable Rails/SaveBang
+
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with AES256)')
+ end
+ end
+
+ context 'with SSE-KMS options' do
+ let(:storage_options) do
+ {
+ server_side_encryption: 'aws:kms',
+ server_side_encryption_kms_key_id: 'arn:aws:kms:12345'
+ }
+ end
+
+ it 'sets encryption attributes' do
+ subject.create # rubocop:disable Rails/SaveBang
+
+ expect(Gitlab::BackupLogger).to have_received(:info).with(message: 'Uploading backup archive to remote storage directory ... done (encrypted with aws:kms)')
+ end
end
end
- end
- context 'with Google provider' do
- before do
- stub_backup_setting(
- upload: {
- connection: {
- provider: 'Google',
- google_storage_access_key_id: 'test-access-id',
- google_storage_secret_access_key: 'secret'
- },
- remote_directory: 'directory',
- multipart_chunk_size: Gitlab.config.backup.upload.multipart_chunk_size,
- encryption: nil,
- encryption_key: nil,
- storage_class: nil
- }
- )
+ context 'with Google provider' do
+ before do
+ stub_backup_setting(
+ upload: {
+ connection: {
+ provider: 'Google',
+ google_storage_access_key_id: 'test-access-id',
+ google_storage_secret_access_key: 'secret'
+ },
+ remote_directory: 'directory',
+ multipart_chunk_size: Gitlab.config.backup.upload.multipart_chunk_size,
+ encryption: nil,
+ encryption_key: nil,
+ storage_class: nil
+ }
+ )
+
+ connection = ::Fog::Storage.new(Gitlab.config.backup.upload.connection.symbolize_keys)
+ connection.directories.create(key: Gitlab.config.backup.upload.remote_directory) # rubocop:disable Rails/SaveBang
+ end
- connection = ::Fog::Storage.new(Gitlab.config.backup.upload.connection.symbolize_keys)
- connection.directories.create(key: Gitlab.config.backup.upload.remote_directory) # rubocop:disable Rails/SaveBang
+ it 'does not attempt to set ACL' do
+ expect_any_instance_of(Fog::Collection).to receive(:create)
+ .with(hash_excluding(public: false))
+ .and_call_original
+
+ subject.create # rubocop:disable Rails/SaveBang
+ end
end
- it 'does not attempt to set ACL' do
- expect_any_instance_of(Fog::Collection).to receive(:create)
- .with(hash_excluding(public: false))
- .and_call_original
+ context 'with AzureRM provider' do
+ before do
+ stub_backup_setting(
+ upload: {
+ connection: {
+ provider: 'AzureRM',
+ azure_storage_account_name: 'test-access-id',
+ azure_storage_access_key: 'secret'
+ },
+ remote_directory: 'directory',
+ multipart_chunk_size: nil,
+ encryption: nil,
+ encryption_key: nil,
+ storage_class: nil
+ }
+ )
+ end
- subject.create # rubocop:disable Rails/SaveBang
+ it 'loads the provider' do
+ expect { subject.create }.not_to raise_error # rubocop:disable Rails/SaveBang
+ end
end
end
+ end
- context 'with AzureRM provider' do
- before do
- stub_backup_setting(
- upload: {
- connection: {
- provider: 'AzureRM',
- azure_storage_account_name: 'test-access-id',
- azure_storage_access_key: 'secret'
- },
- remote_directory: 'directory',
- multipart_chunk_size: nil,
- encryption: nil,
- encryption_key: nil,
- storage_class: nil
- }
- )
- end
+ context 'tar skipped' do
+ before do
+ stub_env('SKIP', 'tar')
+ end
- it 'loads the provider' do
- expect { subject.create }.not_to raise_error # rubocop:disable Rails/SaveBang
+ after do
+ FileUtils.rm_rf(Dir.glob(File.join(Gitlab.config.backup.path, '*')), secure: true)
+ end
+
+ it 'creates a non-tarred backup' do
+ travel_to(backup_time) do
+ subject.create # rubocop:disable Rails/SaveBang
end
+
+ expect(Kernel).not_to have_received(:system).with(*pack_tar_cmdline)
end
end
@@ -602,11 +599,17 @@ RSpec.describe Backup::Manager do
let(:unpack_tar_cmdline) { ['tar', '-xf', pack_tar_file] }
let(:backup_information) do
{
- backup_created_at: Time.zone.parse('2019-01-01'),
+ backup_created_at: backup_time,
gitlab_version: gitlab_version
}
end
+ before do
+ allow(YAML).to receive(:load_file).and_call_original
+ allow(YAML).to receive(:load_file).with(File.join(Gitlab.config.backup.path, 'backup_information.yml'))
+ .and_return(backup_information)
+ end
+
context 'when there are no backup files in the directory' do
before do
allow(Dir).to receive(:glob).and_return([])
diff --git a/spec/migrations/20210918201050_remove_old_pending_jobs_for_recalculate_vulnerabilities_occurrences_uuid_spec.rb b/spec/migrations/20210918201050_remove_old_pending_jobs_for_recalculate_vulnerabilities_occurrences_uuid_spec.rb
index 9addaaf2551..d1c04c5d320 100644
--- a/spec/migrations/20210918201050_remove_old_pending_jobs_for_recalculate_vulnerabilities_occurrences_uuid_spec.rb
+++ b/spec/migrations/20210918201050_remove_old_pending_jobs_for_recalculate_vulnerabilities_occurrences_uuid_spec.rb
@@ -1,6 +1,7 @@
# frozen_string_literal: true
+
require 'spec_helper'
-require Rails.root.join('db', 'post_migrate', '20210918201050_remove_old_pending_jobs_for_recalculate_vulnerabilities_occurrences_uuid.rb')
+require_migration!
def create_background_migration_jobs(ids, status, created_at)
proper_status = case status
diff --git a/spec/migrations/20220124130028_dedup_runner_projects_spec.rb b/spec/migrations/20220124130028_dedup_runner_projects_spec.rb
index 2698af6f6f5..127f4798f33 100644
--- a/spec/migrations/20220124130028_dedup_runner_projects_spec.rb
+++ b/spec/migrations/20220124130028_dedup_runner_projects_spec.rb
@@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
-require Rails.root.join('db', 'post_migrate', '20220124130028_dedup_runner_projects.rb')
+require_migration!
RSpec.describe DedupRunnerProjects, :migration, schema: 20220120085655 do
let(:namespaces) { table(:namespaces) }
diff --git a/spec/migrations/associate_existing_dast_builds_with_variables_spec.rb b/spec/migrations/associate_existing_dast_builds_with_variables_spec.rb
index ce0ab4223e8..74429e498df 100644
--- a/spec/migrations/associate_existing_dast_builds_with_variables_spec.rb
+++ b/spec/migrations/associate_existing_dast_builds_with_variables_spec.rb
@@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
-require Rails.root.join('db', 'migrate', '20210629031900_associate_existing_dast_builds_with_variables.rb')
+require_migration!
RSpec.describe AssociateExistingDastBuildsWithVariables do
subject(:migration) { described_class.new }
diff --git a/spec/migrations/backfill_cadence_id_for_boards_scoped_to_iteration_spec.rb b/spec/migrations/backfill_cadence_id_for_boards_scoped_to_iteration_spec.rb
index 1a64de8d0db..16a08ec47c4 100644
--- a/spec/migrations/backfill_cadence_id_for_boards_scoped_to_iteration_spec.rb
+++ b/spec/migrations/backfill_cadence_id_for_boards_scoped_to_iteration_spec.rb
@@ -2,7 +2,6 @@
require 'spec_helper'
require_migration!
-# require Rails.root.join('db', 'post_migrate', '20210825193652_backfill_candence_id_for_boards_scoped_to_iteration.rb')
RSpec.describe BackfillCadenceIdForBoardsScopedToIteration, :migration do
let(:projects) { table(:projects) }
diff --git a/spec/migrations/insert_ci_daily_pipeline_schedule_triggers_plan_limits_spec.rb b/spec/migrations/insert_ci_daily_pipeline_schedule_triggers_plan_limits_spec.rb
index 4b8d3641247..1b6cb6a86a0 100644
--- a/spec/migrations/insert_ci_daily_pipeline_schedule_triggers_plan_limits_spec.rb
+++ b/spec/migrations/insert_ci_daily_pipeline_schedule_triggers_plan_limits_spec.rb
@@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
-require Rails.root.join('db', 'migrate', '20210526190553_insert_ci_daily_pipeline_schedule_triggers_plan_limits.rb')
+require_migration!
RSpec.describe InsertCiDailyPipelineScheduleTriggersPlanLimits do
let_it_be(:plans) { table(:plans) }
diff --git a/spec/migrations/migrate_protected_attribute_to_pending_builds_spec.rb b/spec/migrations/migrate_protected_attribute_to_pending_builds_spec.rb
index e838476a650..2108adcc973 100644
--- a/spec/migrations/migrate_protected_attribute_to_pending_builds_spec.rb
+++ b/spec/migrations/migrate_protected_attribute_to_pending_builds_spec.rb
@@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
-require Rails.root.join('db', 'post_migrate', '20210610102413_migrate_protected_attribute_to_pending_builds.rb')
+require_migration!
RSpec.describe MigrateProtectedAttributeToPendingBuilds do
let(:namespaces) { table(:namespaces) }
diff --git a/spec/migrations/retry_backfill_traversal_ids_spec.rb b/spec/migrations/retry_backfill_traversal_ids_spec.rb
index e5ebd4228ca..910be9f2c69 100644
--- a/spec/migrations/retry_backfill_traversal_ids_spec.rb
+++ b/spec/migrations/retry_backfill_traversal_ids_spec.rb
@@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
-require Rails.root.join('db', 'post_migrate', '20210604070207_retry_backfill_traversal_ids.rb')
+require_migration!
RSpec.describe RetryBackfillTraversalIds, :migration do
include ReloadHelpers
diff --git a/spec/requests/lfs_http_spec.rb b/spec/requests/lfs_http_spec.rb
index 4b2f11da77e..acf83916f82 100644
--- a/spec/requests/lfs_http_spec.rb
+++ b/spec/requests/lfs_http_spec.rb
@@ -2,9 +2,12 @@
require 'spec_helper'
RSpec.describe 'Git LFS API and storage' do
+ using RSpec::Parameterized::TableSyntax
+
include LfsHttpHelpers
include ProjectForksHelper
include WorkhorseHelpers
+ include WorkhorseLfsHelpers
let_it_be(:project, reload: true) { create(:project, :empty_repo) }
let_it_be(:user) { create(:user) }
@@ -814,7 +817,23 @@ RSpec.describe 'Git LFS API and storage' do
context 'and request to finalize the upload is not sent by gitlab-workhorse' do
it 'fails with a JWT decode error' do
- expect { put_finalize(lfs_tmp_file, verified: false) }.to raise_error(JWT::DecodeError)
+ expect { put_finalize(verified: false) }.to raise_error(JWT::DecodeError)
+ end
+ end
+
+ context 'and the uploaded file is invalid' do
+ where(:size, :sha256, :status) do
+ nil | nil | :ok # Test setup sanity check
+ 0 | nil | :bad_request
+ nil | 'a' * 64 | :bad_request
+ end
+
+ with_them do
+ it 'validates the upload size and SHA256' do
+ put_finalize(size: size, sha256: sha256)
+
+ expect(response).to have_gitlab_http_status(status)
+ end
end
end
@@ -840,7 +859,7 @@ RSpec.describe 'Git LFS API and storage' do
let(:tmp_object) do
fog_connection.directories.new(key: 'lfs-objects').files.create( # rubocop: disable Rails/SaveBang
key: 'tmp/uploads/12312300',
- body: 'content'
+ body: 'x' * sample_size
)
end
@@ -1106,13 +1125,7 @@ RSpec.describe 'Git LFS API and storage' do
context 'when pushing the same LFS object to the second project' do
before do
- finalize_headers = headers
- .merge('X-Gitlab-Lfs-Tmp' => lfs_tmp_file)
- .merge(workhorse_internal_api_request_header)
-
- put objects_url(second_project, sample_oid, sample_size),
- params: {},
- headers: finalize_headers
+ put_finalize(with_tempfile: true, to_project: second_project)
end
it_behaves_like 'LFS http 200 response'
@@ -1130,38 +1143,6 @@ RSpec.describe 'Git LFS API and storage' do
put authorize_url(project, sample_oid, sample_size), params: {}, headers: authorize_headers
end
-
- def put_finalize(lfs_tmp = lfs_tmp_file, with_tempfile: false, verified: true, remote_object: nil, args: {})
- uploaded_file = nil
-
- if with_tempfile
- upload_path = LfsObjectUploader.workhorse_local_upload_path
- file_path = upload_path + '/' + lfs_tmp if lfs_tmp
-
- FileUtils.mkdir_p(upload_path)
- FileUtils.touch(file_path)
-
- uploaded_file = UploadedFile.new(file_path, filename: File.basename(file_path))
- elsif remote_object
- uploaded_file = fog_to_uploaded_file(remote_object)
- end
-
- finalize_headers = headers
- finalize_headers.merge!(workhorse_internal_api_request_header) if verified
-
- workhorse_finalize(
- objects_url(project, sample_oid, sample_size),
- method: :put,
- file_key: :file,
- params: args.merge(file: uploaded_file),
- headers: finalize_headers,
- send_rewritten_field: include_workhorse_jwt_header
- )
- end
-
- def lfs_tmp_file
- "#{sample_oid}012345678"
- end
end
end
end
diff --git a/spec/support/helpers/workhorse_helpers.rb b/spec/support/helpers/workhorse_helpers.rb
index 83bda6e03b1..6f22df9ae0f 100644
--- a/spec/support/helpers/workhorse_helpers.rb
+++ b/spec/support/helpers/workhorse_helpers.rb
@@ -114,16 +114,18 @@ module WorkhorseHelpers
end
params["#{key}.remote_id"] = file.remote_id if file.respond_to?(:remote_id) && file.remote_id.present?
+ params["#{key}.sha256"] = file.sha256 if file.respond_to?(:sha256) && file.sha256.present?
end
end
- def fog_to_uploaded_file(file)
+ def fog_to_uploaded_file(file, sha256: nil)
filename = File.basename(file.key)
UploadedFile.new(nil,
filename: filename,
remote_id: filename,
- size: file.content_length
+ size: file.content_length,
+ sha256: sha256
)
end
end
diff --git a/spec/support/helpers/workhorse_lfs_helpers.rb b/spec/support/helpers/workhorse_lfs_helpers.rb
new file mode 100644
index 00000000000..c9644826317
--- /dev/null
+++ b/spec/support/helpers/workhorse_lfs_helpers.rb
@@ -0,0 +1,45 @@
+# frozen_string_literal: true
+
+module WorkhorseLfsHelpers
+ extend self
+
+ def put_finalize(
+ lfs_tmp = nil, with_tempfile: false, verified: true, remote_object: nil,
+ args: {}, to_project: nil, size: nil, sha256: nil)
+
+ lfs_tmp ||= "#{sample_oid}012345678"
+ to_project ||= project
+ uploaded_file =
+ if with_tempfile
+ upload_path = LfsObjectUploader.workhorse_local_upload_path
+ file_path = upload_path + '/' + lfs_tmp
+
+ FileUtils.mkdir_p(upload_path)
+ FileUtils.touch(file_path)
+ File.truncate(file_path, sample_size)
+
+ UploadedFile.new(file_path, filename: File.basename(file_path), sha256: sample_oid)
+ elsif remote_object
+ fog_to_uploaded_file(remote_object, sha256: sample_oid)
+ else
+ UploadedFile.new(
+ nil,
+ size: size || sample_size,
+ sha256: sha256 || sample_oid,
+ remote_id: 'remote id'
+ )
+ end
+
+ finalize_headers = headers
+ finalize_headers.merge!(workhorse_internal_api_request_header) if verified
+
+ workhorse_finalize(
+ objects_url(to_project, sample_oid, sample_size),
+ method: :put,
+ file_key: :file,
+ params: args.merge(file: uploaded_file),
+ headers: finalize_headers,
+ send_rewritten_field: include_workhorse_jwt_header
+ )
+ end
+end
diff --git a/workhorse/.tool-versions b/workhorse/.tool-versions
index 108bdd0f6a5..c90984122a3 100644
--- a/workhorse/.tool-versions
+++ b/workhorse/.tool-versions
@@ -1 +1 @@
-golang 1.17.7
+golang 1.17.9