diff --git a/moderation.go b/moderation.go index f8d20ee..45d0524 100644 --- a/moderation.go +++ b/moderation.go @@ -44,24 +44,32 @@ type Result struct { // ResultCategories represents Categories of Result. type ResultCategories struct { - Hate bool `json:"hate"` - HateThreatening bool `json:"hate/threatening"` - SelfHarm bool `json:"self-harm"` - Sexual bool `json:"sexual"` - SexualMinors bool `json:"sexual/minors"` - Violence bool `json:"violence"` - ViolenceGraphic bool `json:"violence/graphic"` + Hate bool `json:"hate"` + HateThreatening bool `json:"hate/threatening"` + Harassment bool `json:"harassment"` + HarassmentThreatening bool `json:"harassment/threatening"` + SelfHarm bool `json:"self-harm"` + SelfHarmIntent bool `json:"self-harm/intent"` + SelfHarmInstructions bool `json:"self-harm/instructions"` + Sexual bool `json:"sexual"` + SexualMinors bool `json:"sexual/minors"` + Violence bool `json:"violence"` + ViolenceGraphic bool `json:"violence/graphic"` } // ResultCategoryScores represents CategoryScores of Result. type ResultCategoryScores struct { - Hate float32 `json:"hate"` - HateThreatening float32 `json:"hate/threatening"` - SelfHarm float32 `json:"self-harm"` - Sexual float32 `json:"sexual"` - SexualMinors float32 `json:"sexual/minors"` - Violence float32 `json:"violence"` - ViolenceGraphic float32 `json:"violence/graphic"` + Hate bool `json:"hate"` + HateThreatening bool `json:"hate/threatening"` + Harassment bool `json:"harassment"` + HarassmentThreatening bool `json:"harassment/threatening"` + SelfHarm bool `json:"self-harm"` + SelfHarmIntent bool `json:"self-harm/intent"` + SelfHarmInstructions bool `json:"self-harm/instructions"` + Sexual bool `json:"sexual"` + SexualMinors bool `json:"sexual/minors"` + Violence bool `json:"violence"` + ViolenceGraphic bool `json:"violence/graphic"` } // ModerationResponse represents a response structure for moderation API. diff --git a/moderation_test.go b/moderation_test.go index 059f0d1..7fdeb9b 100644 --- a/moderation_test.go +++ b/moderation_test.go @@ -80,18 +80,49 @@ func handleModerationEndpoint(w http.ResponseWriter, r *http.Request) { resCat := openai.ResultCategories{} resCatScore := openai.ResultCategoryScores{} switch { - case strings.Contains(moderationReq.Input, "kill"): - resCat = openai.ResultCategories{Violence: true} - resCatScore = openai.ResultCategoryScores{Violence: 1} case strings.Contains(moderationReq.Input, "hate"): resCat = openai.ResultCategories{Hate: true} - resCatScore = openai.ResultCategoryScores{Hate: 1} + resCatScore = openai.ResultCategoryScores{Hate: true} + + case strings.Contains(moderationReq.Input, "hate more"): + resCat = openai.ResultCategories{HateThreatening: true} + resCatScore = openai.ResultCategoryScores{HateThreatening: true} + + case strings.Contains(moderationReq.Input, "harass"): + resCat = openai.ResultCategories{Harassment: true} + resCatScore = openai.ResultCategoryScores{Harassment: true} + + case strings.Contains(moderationReq.Input, "harass hard"): + resCat = openai.ResultCategories{Harassment: true} + resCatScore = openai.ResultCategoryScores{HarassmentThreatening: true} + case strings.Contains(moderationReq.Input, "suicide"): resCat = openai.ResultCategories{SelfHarm: true} - resCatScore = openai.ResultCategoryScores{SelfHarm: 1} + resCatScore = openai.ResultCategoryScores{SelfHarm: true} + + case strings.Contains(moderationReq.Input, "wanna suicide"): + resCat = openai.ResultCategories{SelfHarmIntent: true} + resCatScore = openai.ResultCategoryScores{SelfHarm: true} + + case strings.Contains(moderationReq.Input, "drink bleach"): + resCat = openai.ResultCategories{SelfHarmInstructions: true} + resCatScore = openai.ResultCategoryScores{SelfHarmInstructions: true} + case strings.Contains(moderationReq.Input, "porn"): resCat = openai.ResultCategories{Sexual: true} - resCatScore = openai.ResultCategoryScores{Sexual: 1} + resCatScore = openai.ResultCategoryScores{Sexual: true} + + case strings.Contains(moderationReq.Input, "child porn"): + resCat = openai.ResultCategories{SexualMinors: true} + resCatScore = openai.ResultCategoryScores{SexualMinors: true} + + case strings.Contains(moderationReq.Input, "kill"): + resCat = openai.ResultCategories{Violence: true} + resCatScore = openai.ResultCategoryScores{Violence: true} + + case strings.Contains(moderationReq.Input, "corpse"): + resCat = openai.ResultCategories{ViolenceGraphic: true} + resCatScore = openai.ResultCategoryScores{ViolenceGraphic: true} } result := openai.Result{Categories: resCat, CategoryScores: resCatScore, Flagged: true}