Create
Create
client.Moderations.New(ctx, body) (*ModelstringResultsarrayModerationNewResponse, error)
post/moderations
Classifies if given messages are potentially harmful across several categories.
Parameters
bodyMessagesfieldModelfieldModerationNewParams
Returns
ModerationNewResponsestruct
package main
import (
"context"
"fmt"
"github.com/stainless-sdks/-go"
"github.com/stainless-sdks/-go/option"
)
func main() {
client := llamaapi.NewClient(
option.WithAPIKey("My API Key"),
)
moderation, err := client.Moderations.New(context.TODO(), llamaapi.ModerationNewParams{
Messages: []llamaapi.MessageUnionParam{llamaapi.MessageUnionParam{
OfUser: &llamaapi.UserMessageParam{
Content: llamaapi.UserMessageContentUnionParam{
OfString: llamaapi.String("string"),
},
Role: llamaapi.UserMessageRoleUser,
},
}},
})
if err != nil {
panic(err.Error())
}
fmt.Printf("%+v\n", moderation.Model)
}
200 Example
{
"model": "model",
"results": [
{
"flagged": true,
"flagged_categories": [
"string"
]
}
]
}